2018-01-14 00:22:39 +03:00
|
|
|
// Copyright 2018 yuzu emulator team
|
2014-12-17 08:38:14 +03:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 11:49:13 +03:00
|
|
|
// Refer to the license.txt file included.
|
2014-04-11 03:58:28 +04:00
|
|
|
|
2018-01-12 06:36:56 +03:00
|
|
|
#include <algorithm>
|
2018-02-25 15:40:22 +03:00
|
|
|
#include <cinttypes>
|
2018-04-20 05:36:48 +03:00
|
|
|
#include <iterator>
|
2018-07-31 15:06:09 +03:00
|
|
|
#include <mutex>
|
|
|
|
#include <vector>
|
2018-01-12 06:36:56 +03:00
|
|
|
|
2018-10-18 19:55:27 +03:00
|
|
|
#include "common/alignment.h"
|
2018-07-31 15:06:09 +03:00
|
|
|
#include "common/assert.h"
|
2020-12-30 12:14:02 +03:00
|
|
|
#include "common/common_funcs.h"
|
2020-02-28 02:12:41 +03:00
|
|
|
#include "common/fiber.h"
|
2015-05-06 10:06:12 +03:00
|
|
|
#include "common/logging/log.h"
|
2015-08-18 00:25:21 +03:00
|
|
|
#include "common/microprofile.h"
|
2021-02-01 03:55:11 +03:00
|
|
|
#include "common/scope_exit.h"
|
2018-01-05 03:45:15 +03:00
|
|
|
#include "common/string_util.h"
|
2018-08-31 19:21:34 +03:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-03-14 00:49:59 +03:00
|
|
|
#include "core/core.h"
|
2016-09-18 03:38:01 +03:00
|
|
|
#include "core/core_timing.h"
|
2019-11-24 00:01:06 +03:00
|
|
|
#include "core/core_timing_util.h"
|
2020-03-10 20:13:39 +03:00
|
|
|
#include "core/cpu_manager.h"
|
2016-05-22 20:30:13 +03:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2016-06-15 02:03:30 +03:00
|
|
|
#include "core/hle/kernel/client_session.h"
|
2017-05-30 02:45:42 +03:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2020-12-30 12:14:02 +03:00
|
|
|
#include "core/hle/kernel/k_address_arbiter.h"
|
|
|
|
#include "core/hle/kernel/k_condition_variable.h"
|
2021-01-31 12:38:57 +03:00
|
|
|
#include "core/hle/kernel/k_event.h"
|
2021-02-13 04:02:51 +03:00
|
|
|
#include "core/hle/kernel/k_memory_block.h"
|
2021-02-13 03:02:35 +03:00
|
|
|
#include "core/hle/kernel/k_memory_layout.h"
|
2021-02-13 04:58:31 +03:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-01-30 09:48:06 +03:00
|
|
|
#include "core/hle/kernel/k_readable_event.h"
|
2021-01-30 12:40:49 +03:00
|
|
|
#include "core/hle/kernel/k_resource_limit.h"
|
2020-12-03 05:08:35 +03:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2021-02-05 04:06:54 +03:00
|
|
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
2020-12-04 08:56:02 +03:00
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2021-02-06 10:14:31 +03:00
|
|
|
#include "core/hle/kernel/k_shared_memory.h"
|
2020-12-22 09:36:53 +03:00
|
|
|
#include "core/hle/kernel/k_synchronization_object.h"
|
2020-12-31 10:01:08 +03:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2021-01-30 10:51:40 +03:00
|
|
|
#include "core/hle/kernel/k_writable_event.h"
|
2018-08-31 19:21:34 +03:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2020-02-25 20:22:11 +03:00
|
|
|
#include "core/hle/kernel/physical_core.h"
|
2015-05-11 17:15:10 +03:00
|
|
|
#include "core/hle/kernel/process.h"
|
2018-01-03 04:40:30 +03:00
|
|
|
#include "core/hle/kernel/svc.h"
|
2020-12-30 12:14:02 +03:00
|
|
|
#include "core/hle/kernel/svc_results.h"
|
2020-03-27 04:13:46 +03:00
|
|
|
#include "core/hle/kernel/svc_types.h"
|
2018-01-03 04:40:30 +03:00
|
|
|
#include "core/hle/kernel/svc_wrap.h"
|
2020-02-26 02:43:28 +03:00
|
|
|
#include "core/hle/kernel/time_manager.h"
|
2019-03-13 10:09:27 +03:00
|
|
|
#include "core/hle/kernel/transfer_memory.h"
|
2017-10-15 00:30:07 +03:00
|
|
|
#include "core/hle/lock.h"
|
2014-10-23 07:20:01 +04:00
|
|
|
#include "core/hle/result.h"
|
2014-04-13 05:55:36 +04:00
|
|
|
#include "core/hle/service/service.h"
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 19:48:06 +03:00
|
|
|
#include "core/memory.h"
|
2019-05-18 04:46:17 +03:00
|
|
|
#include "core/reporter.h"
|
2014-04-11 03:58:28 +04:00
|
|
|
|
2020-03-27 03:00:30 +03:00
|
|
|
namespace Kernel::Svc {
|
2018-09-14 02:14:50 +03:00
|
|
|
namespace {
|
2018-10-10 21:18:27 +03:00
|
|
|
|
|
|
|
// Checks if address + size is greater than the given address
|
|
|
|
// This can return false if the size causes an overflow of a 64-bit type
|
|
|
|
// or if the given size is zero.
|
|
|
|
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
|
|
|
|
return address + size > address;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper function that performs the common sanity checks for svcMapMemory
|
|
|
|
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
|
|
|
// in the same order.
|
2021-02-13 04:58:31 +03:00
|
|
|
ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
|
|
|
|
u64 size) {
|
2018-11-26 11:47:39 +03:00
|
|
|
if (!Common::Is4KBAligned(dst_addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
2018-11-26 11:47:39 +03:00
|
|
|
if (!Common::Is4KBAligned(src_addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-11-26 11:47:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-11-26 11:47:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
dst_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
src_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (!manager.IsInsideAddressSpace(src_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
src_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (manager.IsOutsideStackRegion(dst_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
2019-07-06 09:02:01 +03:00
|
|
|
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
|
2018-11-26 11:47:39 +03:00
|
|
|
dst_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (manager.IsInsideHeapRegion(dst_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination does not fit within the heap region, addr=0x{:016X}, "
|
2020-04-09 06:14:18 +03:00
|
|
|
"size=0x{:016X}",
|
|
|
|
dst_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (manager.IsInsideAliasRegion(dst_addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination does not fit within the map region, addr=0x{:016X}, "
|
2020-04-09 06:14:18 +03:00
|
|
|
"size=0x{:016X}",
|
|
|
|
dst_addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-10-10 21:18:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-11-27 03:14:29 +03:00
|
|
|
|
|
|
|
enum class ResourceLimitValueType {
|
|
|
|
CurrentValue,
|
|
|
|
LimitValue,
|
2021-02-13 03:05:24 +03:00
|
|
|
PeakValue,
|
2018-11-27 03:14:29 +03:00
|
|
|
};
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
|
|
|
|
u32 resource_type, ResourceLimitValueType value_type) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2021-01-30 12:40:49 +03:00
|
|
|
const auto type = static_cast<LimitableResource>(resource_type);
|
2018-11-27 03:14:29 +03:00
|
|
|
if (!IsValidResourceType(type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2018-11-27 03:14:29 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2018-11-27 03:14:29 +03:00
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
const auto resource_limit_object =
|
2021-01-30 12:40:49 +03:00
|
|
|
current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
|
2018-11-27 03:14:29 +03:00
|
|
|
if (!resource_limit_object) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
|
|
|
|
resource_limit);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-11-27 03:14:29 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 03:05:24 +03:00
|
|
|
switch (value_type) {
|
|
|
|
case ResourceLimitValueType::CurrentValue:
|
2021-01-30 12:40:49 +03:00
|
|
|
return MakeResult(resource_limit_object->GetCurrentValue(type));
|
2021-02-13 03:05:24 +03:00
|
|
|
case ResourceLimitValueType::LimitValue:
|
|
|
|
return MakeResult(resource_limit_object->GetLimitValue(type));
|
|
|
|
case ResourceLimitValueType::PeakValue:
|
|
|
|
return MakeResult(resource_limit_object->GetPeakValue(type));
|
|
|
|
default:
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type);
|
|
|
|
return ResultInvalidEnumValue;
|
2018-11-27 03:14:29 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-14 02:14:50 +03:00
|
|
|
} // Anonymous namespace
|
2014-04-11 07:26:12 +04:00
|
|
|
|
2017-12-28 23:29:52 +03:00
|
|
|
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
|
2018-09-14 02:09:04 +03:00
|
|
|
|
2018-11-27 04:29:06 +03:00
|
|
|
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
|
2018-11-27 04:53:18 +03:00
|
|
|
if ((heap_size % 0x200000) != 0) {
|
2018-11-27 04:29:06 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}",
|
|
|
|
heap_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-11-27 04:29:06 +03:00
|
|
|
}
|
|
|
|
|
2018-11-27 04:53:18 +03:00
|
|
|
if (heap_size >= 0x200000000) {
|
2018-11-27 04:29:06 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-09-14 02:09:04 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
|
|
|
|
|
|
|
CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size));
|
2018-12-28 02:31:31 +03:00
|
|
|
|
2017-12-28 23:29:52 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
|
|
|
|
VAddr temp_heap_addr{};
|
|
|
|
const ResultCode result{SetHeapSize(system, &temp_heap_addr, heap_size)};
|
|
|
|
*heap_addr = static_cast<u32>(temp_heap_addr);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
|
|
|
|
u32 attribute) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-12-15 23:21:41 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
|
|
|
|
size, mask, attribute);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-12-15 23:21:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.",
|
|
|
|
size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-12-15 23:21:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})",
|
|
|
|
address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-12-15 23:21:41 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 04:02:51 +03:00
|
|
|
const auto attributes{static_cast<MemoryAttribute>(mask | attribute)};
|
|
|
|
if (attributes != static_cast<MemoryAttribute>(mask) ||
|
|
|
|
(attributes | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) {
|
2018-12-15 23:21:41 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
|
|
|
|
attribute, mask);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCombination;
|
2018-12-15 23:21:41 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
2018-12-15 23:21:41 +03:00
|
|
|
|
2021-02-13 04:02:51 +03:00
|
|
|
return page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
|
|
|
|
static_cast<KMemoryAttribute>(attribute));
|
2018-01-08 05:23:42 +03:00
|
|
|
}
|
|
|
|
|
2020-06-19 03:33:04 +03:00
|
|
|
static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
|
|
|
|
u32 attribute) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return SetMemoryAttribute(system, address, size, mask, attribute);
|
2020-06-19 03:33:04 +03:00
|
|
|
}
|
|
|
|
|
2017-12-29 05:38:38 +03:00
|
|
|
/// Maps a memory range into a different range.
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 19:20:50 +03:00
|
|
|
src_addr, size);
|
2018-09-14 02:14:50 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
2018-12-28 02:31:31 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
|
|
|
result.IsError()) {
|
2018-10-10 21:18:27 +03:00
|
|
|
return result;
|
2018-09-14 02:14:50 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
return page_table.Map(dst_addr, src_addr, size);
|
2017-12-29 05:38:38 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return MapMemory(system, dst_addr, src_addr, size);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2017-12-31 23:22:49 +03:00
|
|
|
/// Unmaps a region that was previously mapped with svcMapMemory
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 19:20:50 +03:00
|
|
|
src_addr, size);
|
2018-09-14 02:14:50 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
2018-12-28 02:31:31 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
|
|
|
result.IsError()) {
|
2018-10-10 21:18:27 +03:00
|
|
|
return result;
|
2018-09-14 02:14:50 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
return page_table.Unmap(dst_addr, src_addr, size);
|
2017-12-31 23:22:49 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return UnmapMemory(system, dst_addr, src_addr, size);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2014-04-13 05:55:36 +04:00
|
|
|
/// Connect to an OS service given the port name, returns the handle to the port to out
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
|
|
|
VAddr port_name_address) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2019-11-26 23:48:19 +03:00
|
|
|
auto& memory = system.Memory();
|
|
|
|
|
|
|
|
if (!memory.IsValidVirtualAddress(port_name_address)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
|
|
|
|
port_name_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultNotFound;
|
2018-09-02 18:58:58 +03:00
|
|
|
}
|
2017-10-15 00:30:07 +03:00
|
|
|
|
|
|
|
static constexpr std::size_t PortNameMaxLength = 11;
|
|
|
|
// Read 1 char beyond the max allowed port name to detect names that are too long.
|
2019-11-26 23:48:19 +03:00
|
|
|
const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
|
2018-09-02 18:58:58 +03:00
|
|
|
if (port_name.size() > PortNameMaxLength) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
|
|
|
|
port_name.size());
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultOutOfRange;
|
2018-09-02 18:58:58 +03:00
|
|
|
}
|
2014-06-02 04:48:29 +04:00
|
|
|
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
|
2014-06-02 04:48:29 +04:00
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
const auto it = kernel.FindNamedPort(port_name);
|
2018-09-02 18:58:58 +03:00
|
|
|
if (!kernel.IsValidNamedPort(it)) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultNotFound;
|
2015-01-30 21:07:04 +03:00
|
|
|
}
|
2014-06-02 04:48:29 +04:00
|
|
|
|
2016-12-05 19:02:08 +03:00
|
|
|
auto client_port = it->second;
|
2016-06-15 02:03:30 +03:00
|
|
|
|
2019-11-25 04:15:51 +03:00
|
|
|
std::shared_ptr<ClientSession> client_session;
|
2016-12-05 21:59:57 +03:00
|
|
|
CASCADE_RESULT(client_session, client_port->Connect());
|
2016-06-15 02:03:30 +03:00
|
|
|
|
|
|
|
// Return the client session
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2018-10-20 21:34:41 +03:00
|
|
|
CASCADE_RESULT(*out_handle, handle_table.Create(client_session));
|
2015-01-23 08:36:58 +03:00
|
|
|
return RESULT_SUCCESS;
|
2014-04-13 05:55:36 +04:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
|
|
|
u32 port_name_address) {
|
|
|
|
|
|
|
|
return ConnectToNamedPort(system, out_handle, port_name_address);
|
|
|
|
}
|
|
|
|
|
2016-12-08 19:06:19 +03:00
|
|
|
/// Makes a blocking IPC call to an OS service.
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
2020-12-03 05:08:35 +03:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2019-11-25 04:15:51 +03:00
|
|
|
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
2017-12-30 21:40:28 +03:00
|
|
|
if (!session) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2014-10-23 07:20:01 +04:00
|
|
|
}
|
2014-05-27 06:12:46 +04:00
|
|
|
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
2014-05-27 06:12:46 +04:00
|
|
|
|
2020-12-03 05:08:35 +03:00
|
|
|
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
2020-02-26 02:43:28 +03:00
|
|
|
{
|
2020-12-04 09:26:42 +03:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-29 00:16:43 +03:00
|
|
|
thread->SetState(ThreadState::Waiting);
|
2021-01-11 01:29:02 +03:00
|
|
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
2020-09-14 21:03:10 +03:00
|
|
|
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
|
2020-02-26 02:43:28 +03:00
|
|
|
}
|
2020-03-31 04:50:05 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
KSynchronizationObject* dummy{};
|
|
|
|
return thread->GetWaitResult(std::addressof(dummy));
|
2014-04-11 03:58:28 +04:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
|
|
|
return SendSyncRequest(system, handle);
|
|
|
|
}
|
|
|
|
|
2017-10-23 07:15:45 +03:00
|
|
|
/// Get the ID for the specified thread.
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-10-23 07:15:45 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2017-10-23 07:15:45 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread's id.
|
|
|
|
*out_thread_id = thread->GetThreadID();
|
2017-10-23 07:15:45 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
|
|
|
|
u32* out_thread_id_high, Handle thread_handle) {
|
|
|
|
u64 out_thread_id{};
|
|
|
|
const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
|
2020-03-02 08:06:41 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
*out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
|
|
|
|
*out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
|
2020-03-02 08:06:41 +03:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-12-19 07:09:08 +03:00
|
|
|
/// Gets the ID of the specified process or a specified thread's owning process.
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) {
|
2018-12-19 07:09:08 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
|
2017-10-23 07:15:45 +03:00
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2019-11-25 04:15:51 +03:00
|
|
|
const std::shared_ptr<Process> process = handle_table.Get<Process>(handle);
|
2018-12-19 07:09:08 +03:00
|
|
|
if (process) {
|
|
|
|
*process_id = process->GetProcessID();
|
|
|
|
return RESULT_SUCCESS;
|
2017-10-23 07:15:45 +03:00
|
|
|
}
|
|
|
|
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
|
2018-12-19 07:09:08 +03:00
|
|
|
if (thread) {
|
|
|
|
const Process* const owner_process = thread->GetOwnerProcess();
|
|
|
|
if (!owner_process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered.");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-12-19 07:09:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*process_id = owner_process->GetProcessID();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: This should also handle debug objects before returning.
|
|
|
|
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2017-10-23 07:15:45 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
|
|
|
|
Handle handle) {
|
|
|
|
u64 process_id{};
|
|
|
|
const auto result = GetProcessId(system, &process_id, handle);
|
|
|
|
*process_id_low = static_cast<u32>(process_id);
|
|
|
|
*process_id_high = static_cast<u32>(process_id >> 32);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-01-01 22:47:57 +03:00
|
|
|
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
2020-12-22 09:36:53 +03:00
|
|
|
static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
|
2019-04-07 01:46:18 +03:00
|
|
|
u64 handle_count, s64 nano_seconds) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
2018-07-02 19:20:50 +03:00
|
|
|
handles_address, handle_count, nano_seconds);
|
2018-01-06 22:34:32 +03:00
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
auto& memory = system.Memory();
|
|
|
|
if (!memory.IsValidVirtualAddress(handles_address)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
|
|
|
|
handles_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidPointer;
|
2018-11-26 09:06:13 +03:00
|
|
|
}
|
2018-01-06 22:34:32 +03:00
|
|
|
|
2018-01-09 23:02:43 +03:00
|
|
|
static constexpr u64 MaxHandles = 0x40;
|
|
|
|
|
2018-11-16 22:24:27 +03:00
|
|
|
if (handle_count > MaxHandles) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
|
|
|
|
MaxHandles, handle_count);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultOutOfRange;
|
2018-11-16 22:24:27 +03:00
|
|
|
}
|
2018-01-06 22:34:32 +03:00
|
|
|
|
2020-02-12 00:36:39 +03:00
|
|
|
auto& kernel = system.Kernel();
|
2020-12-22 09:36:53 +03:00
|
|
|
std::vector<KSynchronizationObject*> objects(handle_count);
|
2020-02-12 00:36:39 +03:00
|
|
|
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2018-01-06 22:34:32 +03:00
|
|
|
|
2018-07-24 16:55:15 +03:00
|
|
|
for (u64 i = 0; i < handle_count; ++i) {
|
2019-11-27 00:29:34 +03:00
|
|
|
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
|
2020-12-22 09:36:53 +03:00
|
|
|
const auto object = handle_table.Get<KSynchronizationObject>(handle);
|
2018-07-24 16:55:15 +03:00
|
|
|
|
|
|
|
if (object == nullptr) {
|
2018-11-26 09:06:13 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-07-24 16:55:15 +03:00
|
|
|
}
|
|
|
|
|
2020-12-22 09:36:53 +03:00
|
|
|
objects[i] = object.get();
|
2018-01-06 22:34:32 +03:00
|
|
|
}
|
2020-12-22 09:36:53 +03:00
|
|
|
return KSynchronizationObject::Wait(kernel, index, objects.data(),
|
|
|
|
static_cast<s32>(objects.size()), nano_seconds);
|
2018-01-01 22:47:57 +03:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
2020-12-22 09:36:53 +03:00
|
|
|
s32 handle_count, u32 timeout_high, s32* index) {
|
2020-03-02 08:06:41 +03:00
|
|
|
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
|
2020-10-21 05:07:39 +03:00
|
|
|
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
|
2020-03-02 08:06:41 +03:00
|
|
|
}
|
|
|
|
|
2018-01-09 23:02:04 +03:00
|
|
|
/// Resumes a thread waiting on WaitSynchronization
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
2018-01-09 23:02:04 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-01-09 23:02:04 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Cancel the thread's wait.
|
|
|
|
thread->WaitCancel();
|
2018-01-09 23:02:04 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) {
|
|
|
|
return CancelSynchronization(system, thread_handle);
|
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
/// Attempts to locks a mutex
|
|
|
|
static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
|
|
|
|
u32 tag) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
|
|
|
|
thread_handle, address, tag);
|
2018-01-01 22:02:26 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Validate the input address.
|
2021-02-13 03:02:35 +03:00
|
|
|
if (IsKernelAddress(address)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
|
|
|
|
address);
|
|
|
|
return ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
if (!Common::IsAligned(address, sizeof(u32))) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
|
|
|
|
return ResultInvalidAddress;
|
|
|
|
}
|
2018-09-18 01:49:51 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
|
2018-01-01 22:02:26 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
|
|
|
|
u32 tag) {
|
|
|
|
return ArbitrateLock(system, thread_handle, address, tag);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2018-01-01 22:04:36 +03:00
|
|
|
/// Unlock a mutex
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
|
2018-01-01 22:04:36 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Validate the input address.
|
2021-02-13 03:02:35 +03:00
|
|
|
if (IsKernelAddress(address)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Attempting to arbitrate an unlock on a kernel address (address={:08X})",
|
|
|
|
address);
|
|
|
|
return ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
if (!Common::IsAligned(address, sizeof(u32))) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
|
|
|
|
return ResultInvalidAddress;
|
|
|
|
}
|
2018-10-18 20:01:26 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
return system.Kernel().CurrentProcess()->SignalToAddress(address);
|
2018-01-01 22:04:36 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
|
|
|
|
return ArbitrateUnlock(system, address);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2018-10-23 07:17:13 +03:00
|
|
|
enum class BreakType : u32 {
|
2018-10-23 07:03:59 +03:00
|
|
|
Panic = 0,
|
2018-10-23 07:17:13 +03:00
|
|
|
AssertionFailed = 1,
|
2018-10-23 07:03:59 +03:00
|
|
|
PreNROLoad = 3,
|
|
|
|
PostNROLoad = 4,
|
|
|
|
PreNROUnload = 5,
|
|
|
|
PostNROUnload = 6,
|
2019-01-27 05:19:04 +03:00
|
|
|
CppException = 7,
|
2018-10-23 07:03:59 +03:00
|
|
|
};
|
|
|
|
|
2018-10-09 04:11:14 +03:00
|
|
|
struct BreakReason {
|
|
|
|
union {
|
2018-10-10 04:23:50 +03:00
|
|
|
u32 raw;
|
2018-10-23 07:03:59 +03:00
|
|
|
BitField<0, 30, BreakType> break_type;
|
2018-10-10 04:27:44 +03:00
|
|
|
BitField<31, 1, u32> signal_debugger;
|
2018-10-09 04:11:14 +03:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2017-10-15 00:30:07 +03:00
|
|
|
/// Break program execution
|
2019-04-07 01:46:18 +03:00
|
|
|
static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
2018-10-09 04:11:14 +03:00
|
|
|
BreakReason break_reason{reason};
|
2018-11-08 07:43:54 +03:00
|
|
|
bool has_dumped_buffer{};
|
2019-05-18 04:46:17 +03:00
|
|
|
std::vector<u8> debug_buffer;
|
2018-10-23 07:03:59 +03:00
|
|
|
|
2018-11-08 07:43:54 +03:00
|
|
|
const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
|
|
|
|
if (sz == 0 || addr == 0 || has_dumped_buffer) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
auto& memory = system.Memory();
|
|
|
|
|
2018-11-08 07:43:54 +03:00
|
|
|
// This typically is an error code so we're going to assume this is the case
|
|
|
|
if (sz == sizeof(u32)) {
|
2019-11-27 00:29:34 +03:00
|
|
|
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
|
2018-11-08 07:43:54 +03:00
|
|
|
} else {
|
|
|
|
// We don't know what's in here so we'll hexdump it
|
2019-05-18 04:46:17 +03:00
|
|
|
debug_buffer.resize(sz);
|
2019-11-27 00:29:34 +03:00
|
|
|
memory.ReadBlock(addr, debug_buffer.data(), sz);
|
2018-11-08 07:43:54 +03:00
|
|
|
std::string hexdump;
|
|
|
|
for (std::size_t i = 0; i < debug_buffer.size(); i++) {
|
|
|
|
hexdump += fmt::format("{:02X} ", debug_buffer[i]);
|
|
|
|
if (i != 0 && i % 16 == 0) {
|
|
|
|
hexdump += '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
|
|
|
|
}
|
|
|
|
has_dumped_buffer = true;
|
|
|
|
};
|
2018-10-23 07:03:59 +03:00
|
|
|
switch (break_reason.break_type) {
|
|
|
|
case BreakType::Panic:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
2018-11-08 07:43:54 +03:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 07:17:13 +03:00
|
|
|
break;
|
|
|
|
case BreakType::AssertionFailed:
|
|
|
|
LOG_CRITICAL(Debug_Emulated,
|
|
|
|
"Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
2018-11-08 07:43:54 +03:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 07:03:59 +03:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROLoad:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_WARNING(
|
2018-10-09 03:10:30 +03:00
|
|
|
Debug_Emulated,
|
2018-10-23 07:17:13 +03:00
|
|
|
"Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
2018-10-23 07:03:59 +03:00
|
|
|
break;
|
|
|
|
case BreakType::PostNROLoad:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 07:03:59 +03:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROUnload:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_WARNING(
|
2018-10-09 03:10:30 +03:00
|
|
|
Debug_Emulated,
|
2018-10-23 07:03:59 +03:00
|
|
|
"Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
|
|
|
break;
|
|
|
|
case BreakType::PostNROUnload:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 07:03:59 +03:00
|
|
|
break;
|
2019-01-27 05:19:04 +03:00
|
|
|
case BreakType::CppException:
|
|
|
|
LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
|
|
|
|
break;
|
2018-10-23 07:03:59 +03:00
|
|
|
default:
|
2018-10-23 07:17:13 +03:00
|
|
|
LOG_WARNING(
|
|
|
|
Debug_Emulated,
|
|
|
|
"Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
static_cast<u32>(break_reason.break_type.Value()), info1, info2);
|
2018-11-08 07:43:54 +03:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 07:03:59 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-18 04:46:17 +03:00
|
|
|
system.GetReporter().SaveSvcBreakReport(
|
|
|
|
static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1,
|
|
|
|
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
|
|
|
|
2018-10-23 07:03:59 +03:00
|
|
|
if (!break_reason.signal_debugger) {
|
2018-10-09 04:11:14 +03:00
|
|
|
LOG_CRITICAL(
|
2018-10-09 03:10:30 +03:00
|
|
|
Debug_Emulated,
|
|
|
|
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
reason, info1, info2);
|
2019-04-07 01:46:18 +03:00
|
|
|
|
2018-11-08 07:43:54 +03:00
|
|
|
handle_debug_buffer(info1, info2);
|
2019-04-07 01:46:18 +03:00
|
|
|
|
2020-12-03 05:08:35 +03:00
|
|
|
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
2021-01-21 00:42:27 +03:00
|
|
|
const auto thread_processor_id = current_thread->GetActiveCore();
|
2019-04-07 01:46:18 +03:00
|
|
|
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
|
2018-10-09 03:10:30 +03:00
|
|
|
}
|
2014-04-17 04:41:33 +04:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
|
2020-12-08 23:38:28 +03:00
|
|
|
Break(system, reason, info1, info2);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2017-10-15 00:30:07 +03:00
|
|
|
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
|
2020-11-23 18:17:18 +03:00
|
|
|
static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
|
2018-09-12 11:51:41 +03:00
|
|
|
if (len == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-26 02:11:22 +03:00
|
|
|
std::string str(len, '\0');
|
2019-11-27 00:29:34 +03:00
|
|
|
system.Memory().ReadBlock(address, str.data(), str.size());
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_DEBUG(Debug_Emulated, "{}", str);
|
2014-05-18 07:37:25 +04:00
|
|
|
}
|
|
|
|
|
2018-01-02 00:01:06 +03:00
|
|
|
/// Gets system/memory information for the current process
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle,
|
|
|
|
u64 info_sub_id) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
2018-07-02 19:20:50 +03:00
|
|
|
info_sub_id, handle);
|
2018-01-02 00:01:06 +03:00
|
|
|
|
2018-10-26 07:37:14 +03:00
|
|
|
enum class GetInfoType : u64 {
|
|
|
|
// 1.0.0+
|
2018-12-31 05:09:00 +03:00
|
|
|
AllowedCPUCoreMask = 0,
|
|
|
|
AllowedThreadPriorityMask = 1,
|
2018-10-26 07:37:14 +03:00
|
|
|
MapRegionBaseAddr = 2,
|
|
|
|
MapRegionSize = 3,
|
|
|
|
HeapRegionBaseAddr = 4,
|
|
|
|
HeapRegionSize = 5,
|
2019-06-10 01:12:02 +03:00
|
|
|
TotalPhysicalMemoryAvailable = 6,
|
2019-03-29 05:59:17 +03:00
|
|
|
TotalPhysicalMemoryUsed = 7,
|
2018-10-26 07:37:14 +03:00
|
|
|
IsCurrentProcessBeingDebugged = 8,
|
2018-12-04 08:29:15 +03:00
|
|
|
RegisterResourceLimit = 9,
|
2018-10-26 07:37:14 +03:00
|
|
|
IdleTickCount = 10,
|
|
|
|
RandomEntropy = 11,
|
2019-06-10 01:08:37 +03:00
|
|
|
ThreadTickCount = 0xF0000002,
|
2018-10-26 07:37:14 +03:00
|
|
|
// 2.0.0+
|
|
|
|
ASLRRegionBaseAddr = 12,
|
|
|
|
ASLRRegionSize = 13,
|
2019-07-06 09:02:01 +03:00
|
|
|
StackRegionBaseAddr = 14,
|
|
|
|
StackRegionSize = 15,
|
2018-10-26 07:37:14 +03:00
|
|
|
// 3.0.0+
|
2019-07-07 19:42:54 +03:00
|
|
|
SystemResourceSize = 16,
|
|
|
|
SystemResourceUsage = 17,
|
2018-10-26 07:37:14 +03:00
|
|
|
TitleId = 18,
|
|
|
|
// 4.0.0+
|
|
|
|
PrivilegedProcessId = 19,
|
|
|
|
// 5.0.0+
|
|
|
|
UserExceptionContextAddr = 20,
|
2019-06-10 01:20:20 +03:00
|
|
|
// 6.0.0+
|
2019-07-07 21:48:11 +03:00
|
|
|
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
|
|
|
|
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
|
2018-10-26 07:37:14 +03:00
|
|
|
};
|
|
|
|
|
2018-12-02 09:37:15 +03:00
|
|
|
const auto info_id_type = static_cast<GetInfoType>(info_id);
|
2018-01-10 08:58:25 +03:00
|
|
|
|
2018-12-02 09:37:15 +03:00
|
|
|
switch (info_id_type) {
|
2018-12-31 05:09:00 +03:00
|
|
|
case GetInfoType::AllowedCPUCoreMask:
|
|
|
|
case GetInfoType::AllowedThreadPriorityMask:
|
2018-01-17 01:06:45 +03:00
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
|
|
|
case GetInfoType::MapRegionSize:
|
2018-01-15 23:42:57 +03:00
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
|
|
|
case GetInfoType::HeapRegionSize:
|
2018-12-02 09:37:15 +03:00
|
|
|
case GetInfoType::ASLRRegionBaseAddr:
|
|
|
|
case GetInfoType::ASLRRegionSize:
|
2019-07-06 09:02:01 +03:00
|
|
|
case GetInfoType::StackRegionBaseAddr:
|
|
|
|
case GetInfoType::StackRegionSize:
|
2019-06-10 01:12:02 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailable:
|
2019-03-29 05:59:17 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsed:
|
2019-07-07 19:42:54 +03:00
|
|
|
case GetInfoType::SystemResourceSize:
|
|
|
|
case GetInfoType::SystemResourceUsage:
|
2018-12-02 09:37:15 +03:00
|
|
|
case GetInfoType::TitleId:
|
2019-06-10 01:20:20 +03:00
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2019-07-07 21:48:11 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
|
|
|
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
|
2018-12-02 09:37:15 +03:00
|
|
|
if (info_sub_id != 0) {
|
2020-04-29 07:53:53 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
|
|
|
|
info_sub_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2018-12-02 09:37:15 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& current_process_handle_table =
|
|
|
|
system.Kernel().CurrentProcess()->GetHandleTable();
|
svc: Use the current process' handle table for retrieving the process instance to act upon
The kernel uses the handle table of the current process to retrieve the
process that should be used to retrieve certain information. To someone
not familiar with the kernel, this might raise the question of "Ok,
sounds nice, but doesn't this make it impossible to retrieve information
about the current process?".
No, it doesn't, because HandleTable instances in the kernel have the
notion of a "pseudo-handle", where certain values allow the kernel to
lookup objects outside of a given handle table. Currently, there's only
a pseudo-handle for the current process (0xFFFF8001) and a pseudo-handle
for the current thread (0xFFFF8000), so to retrieve the current process,
one would just pass 0xFFFF8001 into svcGetInfo.
The lookup itself in the handle table would be something like:
template <typename T>
T* Lookup(Handle handle) {
if (handle == PSEUDO_HANDLE_CURRENT_PROCESS) {
return CurrentProcess();
}
if (handle == PSUEDO_HANDLE_CURRENT_THREAD) {
return CurrentThread();
}
return static_cast<T*>(&objects[handle]);
}
which, as is shown, allows accessing the current process or current
thread, even if those two objects aren't actually within the HandleTable
instance.
2018-12-02 10:00:11 +03:00
|
|
|
const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle));
|
2018-12-02 09:37:15 +03:00
|
|
|
if (!process) {
|
2020-04-29 07:53:53 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
|
|
|
|
info_id, info_sub_id, handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-12-02 09:37:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (info_id_type) {
|
2018-12-31 05:09:00 +03:00
|
|
|
case GetInfoType::AllowedCPUCoreMask:
|
|
|
|
*result = process->GetCoreMask();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-31 05:09:00 +03:00
|
|
|
case GetInfoType::AllowedThreadPriorityMask:
|
|
|
|
*result = process->GetPriorityMask();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetAliasRegionStart();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::MapRegionSize:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetAliasRegionSize();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetHeapRegionStart();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::HeapRegionSize:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetHeapRegionSize();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::ASLRRegionBaseAddr:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetAliasCodeRegionStart();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::ASLRRegionSize:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetAliasCodeRegionSize();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-06 09:02:01 +03:00
|
|
|
case GetInfoType::StackRegionBaseAddr:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetStackRegionStart();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-06 09:02:01 +03:00
|
|
|
case GetInfoType::StackRegionSize:
|
2020-04-09 06:14:18 +03:00
|
|
|
*result = process->PageTable().GetStackRegionSize();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-06-10 01:12:02 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailable:
|
2019-06-10 01:20:20 +03:00
|
|
|
*result = process->GetTotalPhysicalMemoryAvailable();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-03-29 05:59:17 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsed:
|
|
|
|
*result = process->GetTotalPhysicalMemoryUsed();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 19:42:54 +03:00
|
|
|
case GetInfoType::SystemResourceSize:
|
|
|
|
*result = process->GetSystemResourceSize();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::SystemResourceUsage:
|
2019-07-07 22:08:29 +03:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
|
2019-07-07 21:48:11 +03:00
|
|
|
*result = process->GetSystemResourceUsage();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::TitleId:
|
|
|
|
*result = process->GetTitleID();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2019-07-07 11:19:16 +03:00
|
|
|
*result = process->GetTLSRegionAddress();
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 21:48:11 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
|
|
|
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
2019-06-10 01:20:20 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 21:48:11 +03:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource:
|
|
|
|
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
2019-06-10 01:20:20 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-02 09:37:15 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-04-28 18:17:59 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2018-12-02 09:37:15 +03:00
|
|
|
}
|
|
|
|
|
2018-02-04 20:34:45 +03:00
|
|
|
case GetInfoType::IsCurrentProcessBeingDebugged:
|
|
|
|
*result = 0;
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-04 08:29:15 +03:00
|
|
|
case GetInfoType::RegisterResourceLimit: {
|
|
|
|
if (handle != 0) {
|
2020-04-29 07:53:53 +03:00
|
|
|
LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-12-04 08:29:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (info_sub_id != 0) {
|
2020-04-29 07:53:53 +03:00
|
|
|
LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
|
|
|
|
info_sub_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCombination;
|
2018-12-04 08:29:15 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
Process* const current_process = system.Kernel().CurrentProcess();
|
2018-12-04 08:29:15 +03:00
|
|
|
HandleTable& handle_table = current_process->GetHandleTable();
|
|
|
|
const auto resource_limit = current_process->GetResourceLimit();
|
|
|
|
if (!resource_limit) {
|
|
|
|
*result = KernelHandle::InvalidHandle;
|
|
|
|
// Yes, the kernel considers this a successful operation.
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto table_result = handle_table.Create(resource_limit);
|
|
|
|
if (table_result.Failed()) {
|
|
|
|
return table_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*result = *table_result;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-02 00:01:06 +03:00
|
|
|
case GetInfoType::RandomEntropy:
|
2018-11-13 20:25:43 +03:00
|
|
|
if (handle != 0) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
|
|
|
|
handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-11-13 20:25:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
|
|
|
|
Process::RANDOM_ENTROPY_SIZE, info_sub_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCombination;
|
2018-11-13 20:25:43 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
*result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
|
2018-11-13 20:25:43 +03:00
|
|
|
return RESULT_SUCCESS;
|
2018-12-02 09:37:15 +03:00
|
|
|
|
2018-01-15 23:42:57 +03:00
|
|
|
case GetInfoType::PrivilegedProcessId:
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 19:20:50 +03:00
|
|
|
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
|
2018-01-15 23:42:57 +03:00
|
|
|
*result = 0;
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-10-26 01:42:50 +03:00
|
|
|
case GetInfoType::ThreadTickCount: {
|
|
|
|
constexpr u64 num_cpus = 4;
|
|
|
|
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
|
|
|
|
info_sub_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCombination;
|
2018-10-26 01:42:50 +03:00
|
|
|
}
|
|
|
|
|
2020-12-31 10:01:08 +03:00
|
|
|
const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>(
|
2019-04-07 01:46:18 +03:00
|
|
|
static_cast<Handle>(handle));
|
2018-10-26 01:42:50 +03:00
|
|
|
if (!thread) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
|
|
|
|
static_cast<Handle>(handle));
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-10-26 01:42:50 +03:00
|
|
|
}
|
|
|
|
|
2019-02-14 20:42:58 +03:00
|
|
|
const auto& core_timing = system.CoreTiming();
|
2020-12-03 05:08:35 +03:00
|
|
|
const auto& scheduler = *system.Kernel().CurrentScheduler();
|
2018-10-26 01:42:50 +03:00
|
|
|
const auto* const current_thread = scheduler.GetCurrentThread();
|
2019-11-25 04:15:51 +03:00
|
|
|
const bool same_thread = current_thread == thread.get();
|
2018-10-26 01:42:50 +03:00
|
|
|
|
|
|
|
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
|
|
|
|
u64 out_ticks = 0;
|
|
|
|
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
|
2021-01-21 00:42:27 +03:00
|
|
|
const u64 thread_ticks = current_thread->GetCpuTime();
|
2018-10-26 01:42:50 +03:00
|
|
|
|
2020-02-25 05:04:12 +03:00
|
|
|
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
2018-10-26 01:42:50 +03:00
|
|
|
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
2020-02-25 05:04:12 +03:00
|
|
|
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
2018-10-26 01:42:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*result = out_ticks;
|
2018-12-02 09:37:15 +03:00
|
|
|
return RESULT_SUCCESS;
|
2018-10-26 01:42:50 +03:00
|
|
|
}
|
2018-12-02 09:37:15 +03:00
|
|
|
|
2018-01-02 00:01:06 +03:00
|
|
|
default:
|
2020-04-28 18:17:59 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2015-05-17 08:06:59 +03:00
|
|
|
}
|
2014-05-02 02:50:36 +04:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
|
|
|
|
u32 info_id, u32 handle, u32 sub_id_high) {
|
2020-12-08 23:38:28 +03:00
|
|
|
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
|
2020-03-02 08:06:41 +03:00
|
|
|
u64 res_value{};
|
|
|
|
|
|
|
|
const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
|
|
|
|
*result_high = static_cast<u32>(res_value >> 32);
|
|
|
|
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-07-07 19:42:54 +03:00
|
|
|
/// Maps memory at a desired address
|
|
|
|
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2019-07-07 19:42:54 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(addr < addr + size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
Process* const current_process{system.Kernel().CurrentProcess()};
|
|
|
|
auto& page_table{current_process->PageTable()};
|
2019-07-07 19:42:54 +03:00
|
|
|
|
|
|
|
if (current_process->GetSystemResourceSize() == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidState;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (!page_table.IsInsideAddressSpace(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
|
|
|
|
size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2020-04-09 06:14:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (page_table.IsOutsideAliasRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
|
|
|
|
size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
return page_table.MapPhysicalMemory(addr, size);
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return MapPhysicalMemory(system, addr, size);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2019-07-07 19:42:54 +03:00
|
|
|
/// Unmaps memory previously mapped via MapPhysicalMemory
|
|
|
|
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2019-07-07 19:42:54 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(addr < addr + size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
Process* const current_process{system.Kernel().CurrentProcess()};
|
|
|
|
auto& page_table{current_process->PageTable()};
|
2019-07-07 19:42:54 +03:00
|
|
|
|
|
|
|
if (current_process->GetSystemResourceSize() == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidState;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (!page_table.IsInsideAddressSpace(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
|
|
|
|
size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2020-04-09 06:14:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (page_table.IsOutsideAliasRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
|
|
|
|
size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
return page_table.UnmapPhysicalMemory(addr, size);
|
2019-07-07 19:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return UnmapPhysicalMemory(system, addr, size);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
/// Sets the thread activity
|
|
|
|
static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
|
2021-02-04 06:33:27 +03:00
|
|
|
ThreadActivity thread_activity) {
|
2021-01-21 00:42:27 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
|
|
|
|
thread_activity);
|
2018-12-03 20:25:27 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Validate the activity.
|
2021-02-04 06:33:27 +03:00
|
|
|
constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
|
|
|
|
return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
|
|
|
|
};
|
|
|
|
if (!IsValidThreadActivity(thread_activity)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})",
|
|
|
|
thread_activity);
|
|
|
|
return ResultInvalidEnumValue;
|
|
|
|
}
|
2018-12-03 20:25:27 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2021-01-21 00:42:27 +03:00
|
|
|
|
|
|
|
// Check that the activity is being set on a non-current thread for the current process.
|
2021-02-04 06:33:27 +03:00
|
|
|
if (thread->GetOwnerProcess() != kernel.CurrentProcess()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread.");
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
|
|
|
if (thread.get() == GetCurrentThreadPointer(kernel)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Thread is busy");
|
|
|
|
return ResultBusy;
|
|
|
|
}
|
2021-01-21 00:42:27 +03:00
|
|
|
|
|
|
|
// Set the activity.
|
2021-02-04 06:33:27 +03:00
|
|
|
const auto set_result = thread->SetActivity(thread_activity);
|
|
|
|
if (set_result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Failed to set thread activity.");
|
|
|
|
return set_result;
|
|
|
|
}
|
2018-12-03 20:25:27 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
return RESULT_SUCCESS;
|
2018-04-03 06:50:17 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
|
|
|
|
Svc::ThreadActivity thread_activity) {
|
|
|
|
return SetThreadActivity(system, thread_handle, thread_activity);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2018-04-03 06:50:17 +03:00
|
|
|
/// Gets the thread context
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
|
|
|
|
thread_handle);
|
2018-09-30 02:58:21 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto* current_process = system.Kernel().CurrentProcess();
|
2021-01-21 00:42:27 +03:00
|
|
|
const std::shared_ptr<KThread> thread =
|
|
|
|
current_process->GetHandleTable().Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-09-30 02:58:21 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Require the handle be to a non-current thread in the current process.
|
2021-02-04 06:33:27 +03:00
|
|
|
if (thread->GetOwnerProcess() != current_process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process.");
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
|
|
|
if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Current thread is busy.");
|
|
|
|
return ResultBusy;
|
|
|
|
}
|
2018-09-30 02:58:21 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread context.
|
|
|
|
std::vector<u8> context;
|
2021-02-04 06:33:27 +03:00
|
|
|
const auto context_result = thread->GetThreadContext3(context);
|
|
|
|
if (context_result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})",
|
|
|
|
context_result.raw);
|
|
|
|
return context_result;
|
|
|
|
}
|
2018-09-30 02:58:21 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Copy the thread context to user space.
|
|
|
|
system.Memory().WriteBlock(out_context, context.data(), context.size());
|
2018-09-30 02:58:21 +03:00
|
|
|
|
2018-04-03 06:50:17 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
|
|
|
|
return GetThreadContext(system, out_context, thread_handle);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2014-06-02 06:12:54 +04:00
|
|
|
/// Gets the priority for the specified thread
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
|
2018-11-26 09:06:13 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-01-01 00:06:11 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread's priority.
|
|
|
|
*out_priority = thread->GetPriority();
|
2015-01-23 08:36:58 +03:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-04 02:49:51 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
|
|
|
|
return GetThreadPriority(system, out_priority, handle);
|
2020-03-02 08:06:41 +03:00
|
|
|
}
|
|
|
|
|
2017-12-31 23:58:16 +03:00
|
|
|
/// Sets the priority for the specified thread
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
|
2018-11-26 09:06:13 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Validate the priority.
|
2021-02-04 06:33:27 +03:00
|
|
|
if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority);
|
|
|
|
return ResultInvalidPriority;
|
|
|
|
}
|
2017-12-31 23:58:16 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2017-12-31 23:58:16 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Set the thread priority.
|
2020-12-30 12:14:02 +03:00
|
|
|
thread->SetBasePriority(priority);
|
2017-12-31 23:58:16 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-06-19 01:15:19 +03:00
|
|
|
static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) {
|
2020-06-19 03:33:04 +03:00
|
|
|
return SetThreadPriority(system, handle, priority);
|
2020-06-19 01:15:19 +03:00
|
|
|
}
|
|
|
|
|
2018-01-01 00:01:04 +03:00
|
|
|
/// Get which CPU core is executing the current thread
|
2019-04-07 01:46:18 +03:00
|
|
|
static u32 GetCurrentProcessorNumber(Core::System& system) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
2020-02-25 20:22:11 +03:00
|
|
|
return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
|
2018-01-01 00:01:04 +03:00
|
|
|
}
|
|
|
|
|
2020-06-19 03:33:04 +03:00
|
|
|
static u32 GetCurrentProcessorNumber32(Core::System& system) {
|
|
|
|
return GetCurrentProcessorNumber(system);
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
|
|
|
|
u64 size, u32 permissions) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-07-02 19:20:50 +03:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
|
|
|
shared_memory_handle, addr, size, permissions);
|
2018-01-15 01:15:31 +03:00
|
|
|
|
2018-10-18 19:55:27 +03:00
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-09-14 03:16:43 +03:00
|
|
|
}
|
|
|
|
|
2018-11-26 11:47:39 +03:00
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-11-26 11:47:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2018-09-14 03:16:43 +03:00
|
|
|
}
|
|
|
|
|
2018-10-18 05:39:21 +03:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-10-18 05:39:21 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 04:02:51 +03:00
|
|
|
const auto permission_type = static_cast<MemoryPermission>(permissions);
|
|
|
|
if ((permission_type | MemoryPermission::Write) != MemoryPermission::ReadWrite) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
|
|
|
|
permissions);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryPermissions;
|
2018-09-14 03:16:43 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto* const current_process{system.Kernel().CurrentProcess()};
|
|
|
|
auto& page_table{current_process->PageTable()};
|
2018-01-15 01:15:31 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (page_table.IsInvalidRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Addr does not fit within the valid region, addr=0x{:016X}, "
|
|
|
|
"size=0x{:016X}",
|
2018-11-26 11:47:39 +03:00
|
|
|
addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-10-18 05:39:21 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (page_table.IsInsideHeapRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Addr does not fit within the heap region, addr=0x{:016X}, "
|
|
|
|
"size=0x{:016X}",
|
|
|
|
addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-09-14 03:16:43 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (page_table.IsInsideAliasRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Address does not fit within the map region, addr=0x{:016X}, "
|
|
|
|
"size=0x{:016X}",
|
2018-11-26 11:47:39 +03:00
|
|
|
addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2018-10-18 05:39:21 +03:00
|
|
|
}
|
|
|
|
|
2021-02-06 10:14:31 +03:00
|
|
|
auto shared_memory{current_process->GetHandleTable().Get<KSharedMemory>(shared_memory_handle)};
|
2018-10-18 05:39:21 +03:00
|
|
|
if (!shared_memory) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
|
|
|
|
shared_memory_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-10-18 05:39:21 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 04:02:51 +03:00
|
|
|
return shared_memory->Map(*current_process, addr, size,
|
|
|
|
static_cast<KMemoryPermission>(permission_type));
|
2018-02-22 22:16:43 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
|
|
|
|
u32 size, u32 permissions) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return MapSharedMemory(system, shared_memory_handle, addr, size, permissions);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
|
|
|
|
VAddr page_info_address, Handle process_handle,
|
|
|
|
VAddr address) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-12-12 19:34:01 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2019-11-25 04:15:51 +03:00
|
|
|
std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle);
|
2017-12-30 21:40:28 +03:00
|
|
|
if (!process) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
|
|
|
|
process_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2017-10-20 06:00:46 +03:00
|
|
|
}
|
2018-12-06 18:59:22 +03:00
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
auto& memory{system.Memory()};
|
2020-04-17 07:59:08 +03:00
|
|
|
const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
|
2020-04-09 06:14:18 +03:00
|
|
|
|
|
|
|
memory.Write64(memory_info_address + 0x00, memory_info.addr);
|
|
|
|
memory.Write64(memory_info_address + 0x08, memory_info.size);
|
|
|
|
memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff);
|
|
|
|
memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr));
|
|
|
|
memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm));
|
|
|
|
memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount);
|
|
|
|
memory.Write32(memory_info_address + 0x20, memory_info.device_refcount);
|
|
|
|
memory.Write32(memory_info_address + 0x24, 0);
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 19:48:06 +03:00
|
|
|
|
|
|
|
// Page info appears to be currently unused by the kernel and is always set to zero.
|
2019-11-27 01:39:57 +03:00
|
|
|
memory.Write32(page_info_address, 0);
|
2018-12-06 18:59:22 +03:00
|
|
|
|
2015-01-23 08:36:58 +03:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-16 04:17:30 +04:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
|
|
|
|
VAddr page_info_address, VAddr query_address) {
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 19:48:06 +03:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
|
|
|
|
"query_address=0x{:016X}",
|
|
|
|
memory_info_address, page_info_address, query_address);
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
return QueryProcessMemory(system, memory_info_address, page_info_address, CurrentProcess,
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 19:48:06 +03:00
|
|
|
query_address);
|
2015-07-17 22:45:12 +03:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
|
|
|
|
u32 page_info_address, u32 query_address) {
|
|
|
|
return QueryMemory(system, memory_info_address, page_info_address, query_address);
|
|
|
|
}
|
|
|
|
|
2020-04-24 01:05:09 +03:00
|
|
|
static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
|
|
|
|
u64 src_address, u64 size) {
|
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
|
|
|
|
"src_address=0x{:016X}, size=0x{:016X}",
|
|
|
|
process_handle, dst_address, src_address, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(src_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
|
|
|
|
src_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(dst_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
|
|
|
|
dst_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range overflows the address space (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range overflows the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
|
|
|
auto process = handle_table.Get<Process>(process_handle);
|
|
|
|
if (!process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
|
|
|
|
process_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
auto& page_table = process->PageTable();
|
|
|
|
if (!page_table.IsInsideAddressSpace(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range is not within the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return page_table.MapProcessCodeMemory(dst_address, src_address, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
|
|
|
|
u64 dst_address, u64 src_address, u64 size) {
|
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}",
|
|
|
|
process_handle, dst_address, src_address, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(dst_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
|
|
|
|
dst_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(src_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
|
|
|
|
src_address);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidSize;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range overflows the address space (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range overflows the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
|
|
|
auto process = handle_table.Get<Process>(process_handle);
|
|
|
|
if (!process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
|
|
|
|
process_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
auto& page_table = process->PageTable();
|
|
|
|
if (!page_table.IsInsideAddressSpace(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range is not within the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryRange;
|
2020-04-24 01:05:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
|
|
|
|
}
|
|
|
|
|
2018-01-01 22:38:34 +03:00
|
|
|
/// Exits the current process
|
2019-04-07 01:46:18 +03:00
|
|
|
static void ExitProcess(Core::System& system) {
|
|
|
|
auto* current_process = system.Kernel().CurrentProcess();
|
2020-02-27 01:55:11 +03:00
|
|
|
UNIMPLEMENTED();
|
2018-01-01 22:38:34 +03:00
|
|
|
|
2018-09-21 09:06:47 +03:00
|
|
|
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
|
|
|
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
|
2018-03-14 00:49:59 +03:00
|
|
|
"Process has already exited");
|
2018-01-01 22:38:34 +03:00
|
|
|
|
2018-09-21 09:06:47 +03:00
|
|
|
current_process->PrepareForTermination();
|
2018-01-01 22:38:34 +03:00
|
|
|
|
|
|
|
// Kill the current thread
|
2021-01-21 00:42:27 +03:00
|
|
|
system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
|
2018-01-01 22:38:34 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static void ExitProcess32(Core::System& system) {
|
|
|
|
ExitProcess(system);
|
|
|
|
}
|
|
|
|
|
2021-01-03 12:49:18 +03:00
|
|
|
static constexpr bool IsValidCoreId(int32_t core_id) {
|
|
|
|
return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
|
|
|
|
}
|
|
|
|
|
2018-01-01 00:10:01 +03:00
|
|
|
/// Creates a new thread
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
|
2021-01-03 12:49:18 +03:00
|
|
|
VAddr stack_bottom, u32 priority, s32 core_id) {
|
2019-04-16 04:33:07 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
2021-01-03 12:49:18 +03:00
|
|
|
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
|
|
|
|
"priority=0x{:08X}, core_id=0x{:08X}",
|
|
|
|
entry_point, arg, stack_bottom, priority, core_id);
|
2018-12-31 05:20:07 +03:00
|
|
|
|
2021-01-03 12:49:18 +03:00
|
|
|
// Adjust core id, if it's the default magic.
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
auto& process = *kernel.CurrentProcess();
|
2021-02-04 06:33:27 +03:00
|
|
|
if (core_id == IdealCoreUseProcessValue) {
|
2021-01-03 12:49:18 +03:00
|
|
|
core_id = process.GetIdealCoreId();
|
2018-12-31 05:20:07 +03:00
|
|
|
}
|
|
|
|
|
2021-01-03 12:49:18 +03:00
|
|
|
// Validate arguments.
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!IsValidCoreId(core_id)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
|
|
|
|
return ResultInvalidCoreId;
|
|
|
|
}
|
|
|
|
if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
|
|
|
|
return ResultInvalidCoreId;
|
|
|
|
}
|
2018-12-31 05:20:07 +03:00
|
|
|
|
2021-02-04 06:33:27 +03:00
|
|
|
if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
|
|
|
|
return ResultInvalidPriority;
|
|
|
|
}
|
|
|
|
if (!process.CheckThreadPriority(priority)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
|
|
|
|
return ResultInvalidPriority;
|
|
|
|
}
|
2020-04-09 06:14:18 +03:00
|
|
|
|
2021-02-05 04:06:54 +03:00
|
|
|
KScopedResourceReservation thread_reservation(
|
|
|
|
kernel.CurrentProcess(), LimitableResource::Threads, 1,
|
|
|
|
system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
|
|
|
|
if (!thread_reservation.Succeeded()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
|
2021-02-13 03:05:24 +03:00
|
|
|
return ResultResourceLimitedExceeded;
|
2021-02-05 04:06:54 +03:00
|
|
|
}
|
2020-04-09 06:14:18 +03:00
|
|
|
|
2021-01-25 09:54:37 +03:00
|
|
|
std::shared_ptr<KThread> thread;
|
|
|
|
{
|
|
|
|
KScopedLightLock lk{process.GetStateLock()};
|
|
|
|
CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority,
|
|
|
|
arg, core_id, stack_bottom, &process));
|
|
|
|
}
|
2018-10-20 21:34:41 +03:00
|
|
|
|
2021-01-03 12:49:18 +03:00
|
|
|
const auto new_thread_handle = process.GetHandleTable().Create(thread);
|
2019-04-14 13:06:04 +03:00
|
|
|
if (new_thread_handle.Failed()) {
|
2018-11-26 09:06:13 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}",
|
2019-04-14 13:06:04 +03:00
|
|
|
new_thread_handle.Code().raw);
|
|
|
|
return new_thread_handle.Code();
|
2018-10-04 01:47:57 +03:00
|
|
|
}
|
2019-04-14 13:06:04 +03:00
|
|
|
*out_handle = *new_thread_handle;
|
2018-01-01 00:10:01 +03:00
|
|
|
|
2019-04-15 22:54:25 +03:00
|
|
|
// Set the thread name for debugging purposes.
|
|
|
|
thread->SetName(
|
|
|
|
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
|
2021-02-05 04:06:54 +03:00
|
|
|
thread_reservation.Commit();
|
2018-01-01 00:10:01 +03:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
|
|
|
|
u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2017-12-30 21:40:28 +03:00
|
|
|
/// Starts the thread for the provided handle
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
2019-04-16 04:33:07 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-12-30 21:37:07 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2017-12-30 21:37:07 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Try to start the thread.
|
2021-02-04 06:33:27 +03:00
|
|
|
const auto run_result = thread->Run();
|
|
|
|
if (run_result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Unable to successfuly start thread (thread handle={:08X}, result={})",
|
|
|
|
thread_handle, run_result.raw);
|
|
|
|
return run_result;
|
|
|
|
}
|
2018-05-20 00:57:44 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
return RESULT_SUCCESS;
|
2017-12-30 21:37:07 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
|
|
|
|
return StartThread(system, thread_handle);
|
|
|
|
}
|
|
|
|
|
2018-01-01 00:11:27 +03:00
|
|
|
/// Called when a thread exits
|
2019-04-07 01:46:18 +03:00
|
|
|
static void ExitThread(Core::System& system) {
|
2019-04-16 04:33:07 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
|
2019-03-16 06:38:51 +03:00
|
|
|
|
2020-12-03 05:08:35 +03:00
|
|
|
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
|
|
|
system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
|
2021-01-21 00:42:27 +03:00
|
|
|
current_thread->Exit();
|
2018-01-01 00:11:27 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static void ExitThread32(Core::System& system) {
|
|
|
|
ExitThread(system);
|
|
|
|
}
|
|
|
|
|
2014-06-01 18:37:19 +04:00
|
|
|
/// Sleep the current thread
|
2019-04-07 01:46:18 +03:00
|
|
|
static void SleepThread(Core::System& system, s64 nanoseconds) {
|
2021-01-21 00:42:27 +03:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
|
2014-11-26 08:34:14 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
2017-01-05 22:14:22 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// When the input tick is positive, sleep.
|
|
|
|
if (nanoseconds > 0) {
|
|
|
|
// Convert the timeout from nanoseconds to ticks.
|
|
|
|
// NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
|
|
|
|
|
|
|
|
// Sleep.
|
|
|
|
// NOTE: Nintendo does not check the result of this sleep.
|
|
|
|
static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
|
|
|
|
} else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
|
|
|
|
KScheduler::YieldWithoutCoreMigration(kernel);
|
|
|
|
} else if (yield_type == Svc::YieldType::WithCoreMigration) {
|
|
|
|
KScheduler::YieldWithCoreMigration(kernel);
|
|
|
|
} else if (yield_type == Svc::YieldType::ToAnyThread) {
|
|
|
|
KScheduler::YieldToAnyThread(kernel);
|
2018-12-04 01:29:21 +03:00
|
|
|
} else {
|
2021-01-21 00:42:27 +03:00
|
|
|
// Nintendo does nothing at all if an otherwise invalid value is passed.
|
|
|
|
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
2020-03-10 20:13:39 +03:00
|
|
|
}
|
2014-06-01 18:37:19 +04:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
|
2020-12-08 23:38:28 +03:00
|
|
|
const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
|
2020-06-20 02:40:07 +03:00
|
|
|
SleepThread(system, nanoseconds);
|
|
|
|
}
|
|
|
|
|
2018-06-21 09:49:43 +03:00
|
|
|
/// Wait process wide key atomic
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
|
|
|
|
u32 tag, s64 timeout_ns) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
|
|
|
|
cv_key, tag, timeout_ns);
|
|
|
|
|
|
|
|
// Validate input.
|
2021-02-13 03:02:35 +03:00
|
|
|
if (IsKernelAddress(address)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
|
|
|
|
return ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
if (!Common::IsAligned(address, sizeof(s32))) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
|
|
|
|
return ResultInvalidAddress;
|
|
|
|
}
|
2020-12-30 12:14:02 +03:00
|
|
|
|
|
|
|
// Convert timeout from nanoseconds to ticks.
|
|
|
|
s64 timeout{};
|
|
|
|
if (timeout_ns > 0) {
|
|
|
|
const s64 offset_tick(timeout_ns);
|
|
|
|
if (offset_tick > 0) {
|
|
|
|
timeout = offset_tick + 2;
|
|
|
|
if (timeout <= 0) {
|
|
|
|
timeout = std::numeric_limits<s64>::max();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
timeout = std::numeric_limits<s64>::max();
|
2020-03-08 19:51:24 +03:00
|
|
|
}
|
2020-12-30 12:14:02 +03:00
|
|
|
} else {
|
|
|
|
timeout = timeout_ns;
|
2020-02-27 05:26:53 +03:00
|
|
|
}
|
2018-01-07 00:14:12 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Wait on the condition variable.
|
|
|
|
return system.Kernel().CurrentProcess()->WaitConditionVariable(
|
|
|
|
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
|
2018-01-07 00:14:12 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
|
|
|
|
u32 timeout_ns_low, u32 timeout_ns_high) {
|
|
|
|
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
|
|
|
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2017-10-15 00:30:07 +03:00
|
|
|
/// Signal process wide key
|
2020-12-30 12:14:02 +03:00
|
|
|
static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
|
2018-01-08 00:55:17 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Signal the condition variable.
|
|
|
|
return system.Kernel().CurrentProcess()->SignalConditionVariable(
|
|
|
|
Common::AlignDown(cv_key, sizeof(u32)), count);
|
|
|
|
}
|
2019-10-12 14:57:32 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
|
|
|
|
SignalProcessWideKey(system, cv_key, count);
|
|
|
|
}
|
2018-05-20 00:58:30 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
constexpr bool IsValidSignalType(Svc::SignalType type) {
|
|
|
|
switch (type) {
|
|
|
|
case Svc::SignalType::Signal:
|
|
|
|
case Svc::SignalType::SignalAndIncrementIfEqual:
|
|
|
|
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
2018-05-20 00:58:30 +03:00
|
|
|
}
|
2014-12-09 07:52:27 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
|
|
|
|
switch (type) {
|
|
|
|
case Svc::ArbitrationType::WaitIfLessThan:
|
|
|
|
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
|
|
|
case Svc::ArbitrationType::WaitIfEqual:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2020-03-02 08:06:41 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
} // namespace
|
2019-04-07 01:46:18 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Wait for an address (via Address Arbiter)
|
|
|
|
static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
|
|
|
|
s32 value, s64 timeout_ns) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
|
|
|
|
address, arb_type, value, timeout_ns);
|
|
|
|
|
|
|
|
// Validate input.
|
2021-02-13 03:02:35 +03:00
|
|
|
if (IsKernelAddress(address)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
|
|
|
|
return ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
if (!Common::IsAligned(address, sizeof(s32))) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
|
|
|
|
return ResultInvalidAddress;
|
|
|
|
}
|
|
|
|
if (!IsValidArbitrationType(arb_type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
|
|
|
|
return ResultInvalidEnumValue;
|
|
|
|
}
|
2020-12-30 12:14:02 +03:00
|
|
|
|
|
|
|
// Convert timeout from nanoseconds to ticks.
|
|
|
|
s64 timeout{};
|
|
|
|
if (timeout_ns > 0) {
|
|
|
|
const s64 offset_tick(timeout_ns);
|
|
|
|
if (offset_tick > 0) {
|
|
|
|
timeout = offset_tick + 2;
|
|
|
|
if (timeout <= 0) {
|
|
|
|
timeout = std::numeric_limits<s64>::max();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
timeout = std::numeric_limits<s64>::max();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
timeout = timeout_ns;
|
2018-06-21 09:49:43 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
|
2018-06-21 09:49:43 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
|
|
|
|
s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
|
|
|
|
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
|
|
|
return WaitForAddress(system, address, arb_type, value, timeout);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2018-06-21 09:49:43 +03:00
|
|
|
// Signals to an address (via Address Arbiter)
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
|
|
|
|
s32 value, s32 count) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
|
|
|
|
address, signal_type, value, count);
|
2019-04-07 01:46:18 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
// Validate input.
|
2021-02-13 03:02:35 +03:00
|
|
|
if (IsKernelAddress(address)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
|
|
|
|
return ResultInvalidCurrentMemory;
|
|
|
|
}
|
|
|
|
if (!Common::IsAligned(address, sizeof(s32))) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
|
|
|
|
return ResultInvalidAddress;
|
|
|
|
}
|
|
|
|
if (!IsValidSignalType(signal_type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
|
|
|
|
return ResultInvalidEnumValue;
|
|
|
|
}
|
2018-06-21 09:49:43 +03:00
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
|
|
|
|
count);
|
2018-06-21 09:49:43 +03:00
|
|
|
}
|
|
|
|
|
2020-12-30 12:14:02 +03:00
|
|
|
static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
|
|
|
|
s32 value, s32 count) {
|
|
|
|
return SignalToAddress(system, address, signal_type, value, count);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2019-12-08 05:12:14 +03:00
|
|
|
static void KernelDebug([[maybe_unused]] Core::System& system,
|
|
|
|
[[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
|
|
|
|
[[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
|
|
|
|
// Intentionally do nothing, as this does nothing in released kernel binaries.
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
|
|
|
|
[[maybe_unused]] u32 trace_state) {
|
|
|
|
// Intentionally do nothing, as this does nothing in released kernel binaries.
|
|
|
|
}
|
|
|
|
|
2018-01-12 05:59:31 +03:00
|
|
|
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
2019-04-07 01:46:18 +03:00
|
|
|
static u64 GetSystemTick(Core::System& system) {
|
2018-11-26 09:06:13 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& core_timing = system.CoreTiming();
|
2019-11-22 23:55:42 +03:00
|
|
|
|
|
|
|
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
|
2020-02-25 05:04:12 +03:00
|
|
|
const u64 result{system.CoreTiming().GetClockTicks()};
|
2018-01-12 05:59:31 +03:00
|
|
|
|
2020-03-28 22:23:28 +03:00
|
|
|
if (!system.Kernel().IsMulticore()) {
|
|
|
|
core_timing.AddTicks(400U);
|
|
|
|
}
|
|
|
|
|
2018-01-12 05:59:31 +03:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
|
2020-12-08 23:38:28 +03:00
|
|
|
const auto time = GetSystemTick(system);
|
2020-06-20 02:40:07 +03:00
|
|
|
*time_low = static_cast<u32>(time);
|
|
|
|
*time_high = static_cast<u32>(time >> 32);
|
|
|
|
}
|
|
|
|
|
2017-10-15 00:30:07 +03:00
|
|
|
/// Close a handle
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode CloseHandle(Core::System& system, Handle handle) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
|
2018-08-28 19:30:33 +03:00
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 21:34:41 +03:00
|
|
|
return handle_table.Close(handle);
|
2015-08-06 03:39:53 +03:00
|
|
|
}
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static ResultCode CloseHandle32(Core::System& system, Handle handle) {
|
|
|
|
return CloseHandle(system, handle);
|
|
|
|
}
|
|
|
|
|
2018-12-05 03:59:29 +03:00
|
|
|
/// Clears the signaled state of an event or process.
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode ResetSignal(Core::System& system, Handle handle) {
|
2018-11-18 23:49:17 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
|
2018-08-28 19:30:33 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Get the current handle table.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-05 03:59:29 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Try to reset as readable event.
|
|
|
|
{
|
|
|
|
auto readable_event = handle_table.Get<KReadableEvent>(handle);
|
|
|
|
if (readable_event) {
|
|
|
|
return readable_event->Reset();
|
|
|
|
}
|
2018-12-05 03:59:29 +03:00
|
|
|
}
|
2018-08-28 19:30:33 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Try to reset as process.
|
|
|
|
{
|
|
|
|
auto process = handle_table.Get<Process>(handle);
|
|
|
|
if (process) {
|
|
|
|
return process->Reset();
|
|
|
|
}
|
2018-12-05 03:59:29 +03:00
|
|
|
}
|
2018-08-28 19:30:33 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
|
|
|
|
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-01-08 05:24:19 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 02:40:07 +03:00
|
|
|
static ResultCode ResetSignal32(Core::System& system, Handle handle) {
|
|
|
|
return ResetSignal(system, handle);
|
|
|
|
}
|
|
|
|
|
2018-01-08 05:24:19 +03:00
|
|
|
/// Creates a TransferMemory object
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size,
|
|
|
|
u32 permissions) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-11-10 01:02:50 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
|
|
|
|
permissions);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-11-10 01:02:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size) || size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidAddress;
|
2018-11-10 01:02:50 +03:00
|
|
|
}
|
|
|
|
|
2018-11-12 00:39:25 +03:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-10 01:02:50 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})",
|
|
|
|
addr, size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2018-11-10 01:02:50 +03:00
|
|
|
}
|
|
|
|
|
2021-02-13 04:02:51 +03:00
|
|
|
const auto perms{static_cast<MemoryPermission>(permissions)};
|
|
|
|
if (perms > MemoryPermission::ReadWrite || perms == MemoryPermission::Write) {
|
2018-11-10 01:02:50 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
|
|
|
|
permissions);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidMemoryPermissions;
|
2018-11-10 01:02:50 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& kernel = system.Kernel();
|
2021-02-05 04:06:54 +03:00
|
|
|
// Reserve a new transfer memory from the process resource limit.
|
|
|
|
KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
|
|
|
|
LimitableResource::TransferMemory);
|
|
|
|
if (!trmem_reservation.Succeeded()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
|
2021-02-13 03:05:24 +03:00
|
|
|
return ResultResourceLimitedExceeded;
|
2021-02-05 04:06:54 +03:00
|
|
|
}
|
2021-02-13 04:02:51 +03:00
|
|
|
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size,
|
|
|
|
static_cast<KMemoryPermission>(perms));
|
2020-01-31 06:39:07 +03:00
|
|
|
|
|
|
|
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
|
|
|
|
return reserve_result;
|
|
|
|
}
|
2018-11-10 01:02:50 +03:00
|
|
|
|
2019-03-13 10:09:27 +03:00
|
|
|
auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2020-01-31 06:39:07 +03:00
|
|
|
const auto result{handle_table.Create(std::move(transfer_mem_handle))};
|
2019-03-13 10:09:27 +03:00
|
|
|
if (result.Failed()) {
|
|
|
|
return result.Code();
|
|
|
|
}
|
2021-02-05 04:06:54 +03:00
|
|
|
trmem_reservation.Commit();
|
2019-03-13 10:09:27 +03:00
|
|
|
|
|
|
|
*handle = *result;
|
2018-01-08 05:24:19 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-06-19 03:33:04 +03:00
|
|
|
static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
|
|
|
|
u32 permissions) {
|
2020-12-08 23:38:28 +03:00
|
|
|
return CreateTransferMemory(system, handle, addr, size, permissions);
|
2020-06-19 03:33:04 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
|
|
|
|
u64* out_affinity_mask) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
2018-05-06 06:13:15 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-05-06 06:13:15 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the core mask.
|
2021-02-04 06:33:27 +03:00
|
|
|
const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask);
|
|
|
|
if (result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw);
|
|
|
|
return result;
|
|
|
|
}
|
2018-05-06 06:13:15 +03:00
|
|
|
|
2018-03-30 04:07:49 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
|
|
|
|
u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
|
|
|
|
u64 out_affinity_mask{};
|
|
|
|
const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
|
|
|
|
*out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
|
|
|
|
*out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
|
2020-06-20 02:40:07 +03:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
|
2019-04-16 03:34:55 +03:00
|
|
|
u64 affinity_mask) {
|
2021-01-21 00:42:27 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
|
|
|
|
thread_handle, core_id, affinity_mask);
|
2018-05-06 06:13:15 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
const auto& current_process = *system.Kernel().CurrentProcess();
|
2018-10-04 01:47:57 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Determine the core id/affinity mask.
|
|
|
|
if (core_id == Svc::IdealCoreUseProcessValue) {
|
|
|
|
core_id = current_process.GetIdealCoreId();
|
|
|
|
affinity_mask = (1ULL << core_id);
|
2019-04-16 03:34:55 +03:00
|
|
|
} else {
|
2021-01-21 00:42:27 +03:00
|
|
|
// Validate the affinity mask.
|
|
|
|
const u64 process_core_mask = current_process.GetCoreMask();
|
2021-02-04 06:33:27 +03:00
|
|
|
if ((affinity_mask | process_core_mask) != process_core_mask) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Affinity mask does match the process core mask (affinity mask={:016X}, core "
|
|
|
|
"mask={:016X})",
|
|
|
|
affinity_mask, process_core_mask);
|
|
|
|
return ResultInvalidCoreId;
|
|
|
|
}
|
|
|
|
if (affinity_mask == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Affinity mask is zero.");
|
|
|
|
return ResultInvalidCombination;
|
|
|
|
}
|
2021-01-21 00:42:27 +03:00
|
|
|
|
|
|
|
// Validate the core id.
|
|
|
|
if (IsValidCoreId(core_id)) {
|
2021-02-04 06:33:27 +03:00
|
|
|
if (((1ULL << core_id) & affinity_mask) == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
|
|
|
|
return ResultInvalidCombination;
|
|
|
|
}
|
2021-01-21 00:42:27 +03:00
|
|
|
} else {
|
2021-02-04 06:33:27 +03:00
|
|
|
if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
|
|
|
|
return ResultInvalidCoreId;
|
|
|
|
}
|
2019-04-16 03:34:55 +03:00
|
|
|
}
|
2018-05-30 20:03:19 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Get the thread from its handle.
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2020-12-31 10:01:08 +03:00
|
|
|
const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-05-30 20:03:19 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
// Set the core mask.
|
2021-02-04 06:33:27 +03:00
|
|
|
const auto set_result = thread->SetCoreMask(core_id, affinity_mask);
|
|
|
|
if (set_result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw);
|
|
|
|
return set_result;
|
|
|
|
}
|
2021-01-21 00:42:27 +03:00
|
|
|
return RESULT_SUCCESS;
|
2018-01-17 01:23:53 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
|
2020-06-19 03:33:04 +03:00
|
|
|
u32 affinity_mask_low, u32 affinity_mask_high) {
|
2020-12-08 23:38:28 +03:00
|
|
|
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
|
2021-01-21 00:42:27 +03:00
|
|
|
return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
|
2020-06-19 01:15:19 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
2018-12-04 23:11:18 +03:00
|
|
|
|
2021-02-05 04:06:54 +03:00
|
|
|
auto& kernel = system.Kernel();
|
2021-02-01 03:55:11 +03:00
|
|
|
// Get the current handle table.
|
2021-02-05 04:06:54 +03:00
|
|
|
const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
|
|
|
|
|
|
|
// Reserve a new event from the process resource limit.
|
|
|
|
KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
|
|
|
|
LimitableResource::Events);
|
|
|
|
if (!event_reservation.Succeeded()) {
|
|
|
|
LOG_ERROR(Kernel, "Could not reserve a new event");
|
2021-02-13 03:05:24 +03:00
|
|
|
return ResultResourceLimitedExceeded;
|
2021-02-05 04:06:54 +03:00
|
|
|
}
|
2018-12-04 23:11:18 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Get the writable event.
|
|
|
|
auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!writable_event) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid event handle provided (handle={:08X})", event_handle);
|
|
|
|
return ResultInvalidHandle;
|
|
|
|
}
|
2018-12-04 23:11:18 +03:00
|
|
|
|
2021-02-05 04:06:54 +03:00
|
|
|
// Commit the successfuly reservation.
|
|
|
|
event_reservation.Commit();
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
return writable_event->Signal();
|
2018-12-04 23:11:18 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
|
|
|
|
return SignalEvent(system, event_handle);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
|
|
|
|
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
2018-02-22 17:28:15 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Get the current handle table.
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-04 06:50:16 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Try to clear the writable event.
|
|
|
|
{
|
|
|
|
auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
|
|
|
|
if (writable_event) {
|
|
|
|
return writable_event->Clear();
|
|
|
|
}
|
2018-10-20 21:34:41 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Try to clear the readable event.
|
|
|
|
{
|
|
|
|
auto readable_event = handle_table.Get<KReadableEvent>(event_handle);
|
|
|
|
if (readable_event) {
|
|
|
|
return readable_event->Clear();
|
|
|
|
}
|
2018-12-04 06:50:16 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
|
|
|
|
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-02-22 17:28:15 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
|
|
|
|
return ClearEvent(system, event_handle);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called");
|
2018-12-04 23:39:49 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Get the kernel reference and handle table.
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2018-12-04 23:39:49 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// Create a new event.
|
|
|
|
const auto event = KEvent::Create(kernel, "CreateEvent");
|
2021-02-04 06:33:27 +03:00
|
|
|
if (!event) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Unable to create new events. Event creation limit reached.");
|
|
|
|
return ResultOutOfResource;
|
|
|
|
}
|
2021-02-01 03:55:11 +03:00
|
|
|
|
|
|
|
// Initialize the event.
|
|
|
|
event->Initialize();
|
|
|
|
|
|
|
|
// Add the writable event to the handle table.
|
|
|
|
const auto write_create_result = handle_table.Create(event->GetWritableEvent());
|
|
|
|
if (write_create_result.Failed()) {
|
|
|
|
return write_create_result.Code();
|
|
|
|
}
|
|
|
|
*out_write = *write_create_result;
|
|
|
|
|
|
|
|
// Add the writable event to the handle table.
|
|
|
|
auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); });
|
|
|
|
|
|
|
|
// Add the readable event to the handle table.
|
|
|
|
const auto read_create_result = handle_table.Create(event->GetReadableEvent());
|
|
|
|
if (read_create_result.Failed()) {
|
|
|
|
return read_create_result.Code();
|
2018-12-04 23:39:49 +03:00
|
|
|
}
|
2021-02-01 03:55:11 +03:00
|
|
|
*out_read = *read_create_result;
|
2018-12-04 23:39:49 +03:00
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
// We succeeded.
|
|
|
|
handle_guard.Cancel();
|
2018-12-04 23:39:49 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-02-01 03:55:11 +03:00
|
|
|
static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
|
|
|
|
return CreateEvent(system, out_write, out_read);
|
2020-06-20 02:40:07 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
|
2018-10-13 21:31:46 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
|
|
|
|
|
|
|
|
// This function currently only allows retrieving a process' status.
|
|
|
|
enum class InfoType {
|
|
|
|
Status,
|
|
|
|
};
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 21:34:41 +03:00
|
|
|
const auto process = handle_table.Get<Process>(process_handle);
|
2018-10-13 21:31:46 +03:00
|
|
|
if (!process) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
|
|
|
|
process_handle);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-10-13 21:31:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const auto info_type = static_cast<InfoType>(type);
|
|
|
|
if (info_type != InfoType::Status) {
|
2018-11-26 11:47:39 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2018-10-13 21:31:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*out = static_cast<u64>(process->GetStatus());
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
2020-02-27 17:28:44 +03:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-11-27 02:23:12 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto& kernel = system.Kernel();
|
2021-01-30 12:40:49 +03:00
|
|
|
auto resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
2018-11-27 02:23:12 +03:00
|
|
|
|
|
|
|
auto* const current_process = kernel.CurrentProcess();
|
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
const auto handle = current_process->GetHandleTable().Create(std::move(resource_limit));
|
|
|
|
if (handle.Failed()) {
|
|
|
|
return handle.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_handle = *handle;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_value,
|
|
|
|
Handle resource_limit, u32 resource_type) {
|
2018-11-27 02:48:07 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type);
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto limit_value = RetrieveResourceLimitValue(system, resource_limit, resource_type,
|
2018-11-27 03:14:29 +03:00
|
|
|
ResourceLimitValueType::LimitValue);
|
|
|
|
if (limit_value.Failed()) {
|
|
|
|
return limit_value.Code();
|
2018-11-27 02:48:07 +03:00
|
|
|
}
|
|
|
|
|
2018-11-27 03:14:29 +03:00
|
|
|
*out_value = static_cast<u64>(*limit_value);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-11-27 02:48:07 +03:00
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_value,
|
|
|
|
Handle resource_limit, u32 resource_type) {
|
2018-11-27 03:14:29 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type);
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto current_value = RetrieveResourceLimitValue(system, resource_limit, resource_type,
|
2018-11-27 03:14:29 +03:00
|
|
|
ResourceLimitValueType::CurrentValue);
|
|
|
|
if (current_value.Failed()) {
|
|
|
|
return current_value.Code();
|
2018-11-27 02:48:07 +03:00
|
|
|
}
|
|
|
|
|
2018-11-27 03:14:29 +03:00
|
|
|
*out_value = static_cast<u64>(*current_value);
|
2018-11-27 02:48:07 +03:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit,
|
|
|
|
u32 resource_type, u64 value) {
|
2018-11-27 03:51:09 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit,
|
|
|
|
resource_type, value);
|
|
|
|
|
2021-01-30 12:40:49 +03:00
|
|
|
const auto type = static_cast<LimitableResource>(resource_type);
|
2018-11-27 03:51:09 +03:00
|
|
|
if (!IsValidResourceType(type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidEnumValue;
|
2018-11-27 03:51:09 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2018-11-27 03:51:09 +03:00
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
auto resource_limit_object =
|
2021-01-30 12:40:49 +03:00
|
|
|
current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
|
2018-11-27 03:51:09 +03:00
|
|
|
if (!resource_limit_object) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
|
|
|
|
resource_limit);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidHandle;
|
2018-11-27 03:51:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value));
|
|
|
|
if (set_result.IsError()) {
|
2021-02-13 02:43:01 +03:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Attempted to lower resource limit ({}) for category '{}' below its current "
|
|
|
|
"value ({})",
|
|
|
|
resource_limit_object->GetLimitValue(type), resource_type,
|
|
|
|
resource_limit_object->GetCurrentValue(type));
|
2018-11-27 03:51:09 +03:00
|
|
|
return set_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
|
|
|
|
VAddr out_process_ids, u32 out_process_ids_size) {
|
2019-03-20 22:03:52 +03:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
|
|
|
|
out_process_ids, out_process_ids_size);
|
|
|
|
|
|
|
|
// If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
|
|
|
|
if ((out_process_ids_size & 0xF0000000) != 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
|
|
|
|
out_process_ids_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultOutOfRange;
|
2019-03-20 22:03:52 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto& kernel = system.Kernel();
|
2019-03-20 22:03:52 +03:00
|
|
|
const auto total_copy_size = out_process_ids_size * sizeof(u64);
|
|
|
|
|
2020-04-09 06:14:18 +03:00
|
|
|
if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace(
|
|
|
|
out_process_ids, total_copy_size)) {
|
2019-03-20 22:03:52 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
|
|
|
|
out_process_ids, out_process_ids + total_copy_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2019-03-20 22:03:52 +03:00
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
auto& memory = system.Memory();
|
2019-03-20 22:03:52 +03:00
|
|
|
const auto& process_list = kernel.GetProcessList();
|
|
|
|
const auto num_processes = process_list.size();
|
|
|
|
const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
|
|
|
|
|
|
|
|
for (std::size_t i = 0; i < copy_amount; ++i) {
|
2019-11-27 01:39:57 +03:00
|
|
|
memory.Write64(out_process_ids, process_list[i]->GetProcessID());
|
2019-03-20 22:03:52 +03:00
|
|
|
out_process_ids += sizeof(u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_num_processes = static_cast<u32>(num_processes);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-05-19 02:01:27 +03:00
|
|
|
static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
|
|
|
|
u32 out_thread_ids_size, Handle debug_handle) {
|
2019-03-21 01:53:48 +03:00
|
|
|
// TODO: Handle this case when debug events are supported.
|
|
|
|
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
|
|
|
|
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
|
|
|
|
out_thread_ids, out_thread_ids_size);
|
|
|
|
|
|
|
|
// If the size is negative or larger than INT32_MAX / sizeof(u64)
|
|
|
|
if ((out_thread_ids_size & 0xF0000000) != 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
|
|
|
|
out_thread_ids_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultOutOfRange;
|
2019-03-21 01:53:48 +03:00
|
|
|
}
|
|
|
|
|
2019-04-07 01:46:18 +03:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2019-03-21 01:53:48 +03:00
|
|
|
const auto total_copy_size = out_thread_ids_size * sizeof(u64);
|
|
|
|
|
|
|
|
if (out_thread_ids_size > 0 &&
|
2020-04-09 06:14:18 +03:00
|
|
|
!current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
|
2019-03-21 01:53:48 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
|
|
|
|
out_thread_ids, out_thread_ids + total_copy_size);
|
2021-02-13 02:43:01 +03:00
|
|
|
return ResultInvalidCurrentMemory;
|
2019-03-21 01:53:48 +03:00
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
auto& memory = system.Memory();
|
2019-03-21 01:53:48 +03:00
|
|
|
const auto& thread_list = current_process->GetThreadList();
|
|
|
|
const auto num_threads = thread_list.size();
|
|
|
|
const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
|
|
|
|
|
|
|
|
auto list_iter = thread_list.cbegin();
|
|
|
|
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
2019-11-27 01:39:57 +03:00
|
|
|
memory.Write64(out_thread_ids, (*list_iter)->GetThreadID());
|
2019-03-21 01:53:48 +03:00
|
|
|
out_thread_ids += sizeof(u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_num_threads = static_cast<u32>(num_threads);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-12-08 23:38:28 +03:00
|
|
|
static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
|
|
|
|
[[maybe_unused]] Handle handle,
|
|
|
|
[[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
|
|
|
|
// Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
|
2020-06-20 02:40:07 +03:00
|
|
|
// as all emulation is done in the same cache level in host architecture, thus data cache
|
|
|
|
// does not need flushing.
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called");
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-05-06 06:04:25 +03:00
|
|
|
namespace {
|
2016-09-18 03:38:01 +03:00
|
|
|
struct FunctionDef {
|
2019-04-07 01:46:18 +03:00
|
|
|
using Func = void(Core::System&);
|
2015-05-06 06:04:25 +03:00
|
|
|
|
2016-09-18 03:38:01 +03:00
|
|
|
u32 id;
|
|
|
|
Func* func;
|
|
|
|
const char* name;
|
|
|
|
};
|
2017-10-15 00:30:07 +03:00
|
|
|
} // namespace
|
2015-05-06 06:04:25 +03:00
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static const FunctionDef SVC_Table_32[] = {
|
2016-09-18 03:38:01 +03:00
|
|
|
{0x00, nullptr, "Unknown"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
|
|
|
|
{0x02, nullptr, "Unknown"},
|
2020-06-19 03:33:04 +03:00
|
|
|
{0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
|
|
|
|
{0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"},
|
|
|
|
{0x08, SvcWrap32<CreateThread32>, "CreateThread32"},
|
|
|
|
{0x09, SvcWrap32<StartThread32>, "StartThread32"},
|
|
|
|
{0x0a, SvcWrap32<ExitThread32>, "ExitThread32"},
|
|
|
|
{0x0b, SvcWrap32<SleepThread32>, "SleepThread32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"},
|
2020-06-19 01:15:19 +03:00
|
|
|
{0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"},
|
2020-06-19 01:15:19 +03:00
|
|
|
{0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"},
|
2020-06-19 03:33:04 +03:00
|
|
|
{0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
|
|
|
|
{0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
|
|
|
|
{0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x14, nullptr, "UnmapSharedMemory32"},
|
2020-06-19 03:33:04 +03:00
|
|
|
{0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"},
|
|
|
|
{0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"},
|
|
|
|
{0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"},
|
|
|
|
{0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
|
|
|
|
{0x20, nullptr, "Unknown"},
|
|
|
|
{0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
|
|
|
|
{0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
|
|
|
|
{0x23, nullptr, "Unknown"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x26, SvcWrap32<Break32>, "Break32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x27, nullptr, "OutputDebugString32"},
|
|
|
|
{0x28, nullptr, "Unknown"},
|
|
|
|
{0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
|
|
|
|
{0x2a, nullptr, "Unknown"},
|
|
|
|
{0x2b, nullptr, "Unknown"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
|
|
|
|
{0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x2e, nullptr, "Unknown"},
|
|
|
|
{0x2f, nullptr, "Unknown"},
|
|
|
|
{0x30, nullptr, "Unknown"},
|
|
|
|
{0x31, nullptr, "Unknown"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
|
|
|
|
{0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
|
|
|
|
{0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
|
|
|
|
{0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x36, nullptr, "Unknown"},
|
|
|
|
{0x37, nullptr, "Unknown"},
|
|
|
|
{0x38, nullptr, "Unknown"},
|
|
|
|
{0x39, nullptr, "Unknown"},
|
|
|
|
{0x3a, nullptr, "Unknown"},
|
|
|
|
{0x3b, nullptr, "Unknown"},
|
|
|
|
{0x3c, nullptr, "Unknown"},
|
|
|
|
{0x3d, nullptr, "Unknown"},
|
|
|
|
{0x3e, nullptr, "Unknown"},
|
|
|
|
{0x3f, nullptr, "Unknown"},
|
|
|
|
{0x40, nullptr, "CreateSession32"},
|
|
|
|
{0x41, nullptr, "AcceptSession32"},
|
|
|
|
{0x42, nullptr, "Unknown"},
|
|
|
|
{0x43, nullptr, "ReplyAndReceive32"},
|
|
|
|
{0x44, nullptr, "Unknown"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x46, nullptr, "Unknown"},
|
|
|
|
{0x47, nullptr, "Unknown"},
|
|
|
|
{0x48, nullptr, "Unknown"},
|
|
|
|
{0x49, nullptr, "Unknown"},
|
|
|
|
{0x4a, nullptr, "Unknown"},
|
|
|
|
{0x4b, nullptr, "Unknown"},
|
|
|
|
{0x4c, nullptr, "Unknown"},
|
|
|
|
{0x4d, nullptr, "Unknown"},
|
|
|
|
{0x4e, nullptr, "Unknown"},
|
|
|
|
{0x4f, nullptr, "Unknown"},
|
|
|
|
{0x50, nullptr, "Unknown"},
|
|
|
|
{0x51, nullptr, "Unknown"},
|
|
|
|
{0x52, nullptr, "Unknown"},
|
|
|
|
{0x53, nullptr, "Unknown"},
|
|
|
|
{0x54, nullptr, "Unknown"},
|
|
|
|
{0x55, nullptr, "Unknown"},
|
|
|
|
{0x56, nullptr, "Unknown"},
|
|
|
|
{0x57, nullptr, "Unknown"},
|
|
|
|
{0x58, nullptr, "Unknown"},
|
|
|
|
{0x59, nullptr, "Unknown"},
|
|
|
|
{0x5a, nullptr, "Unknown"},
|
|
|
|
{0x5b, nullptr, "Unknown"},
|
|
|
|
{0x5c, nullptr, "Unknown"},
|
|
|
|
{0x5d, nullptr, "Unknown"},
|
|
|
|
{0x5e, nullptr, "Unknown"},
|
2020-06-20 02:40:07 +03:00
|
|
|
{0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x60, nullptr, "Unknown"},
|
|
|
|
{0x61, nullptr, "Unknown"},
|
|
|
|
{0x62, nullptr, "Unknown"},
|
|
|
|
{0x63, nullptr, "Unknown"},
|
|
|
|
{0x64, nullptr, "Unknown"},
|
|
|
|
{0x65, nullptr, "GetProcessList32"},
|
|
|
|
{0x66, nullptr, "Unknown"},
|
|
|
|
{0x67, nullptr, "Unknown"},
|
|
|
|
{0x68, nullptr, "Unknown"},
|
|
|
|
{0x69, nullptr, "Unknown"},
|
|
|
|
{0x6A, nullptr, "Unknown"},
|
|
|
|
{0x6B, nullptr, "Unknown"},
|
|
|
|
{0x6C, nullptr, "Unknown"},
|
|
|
|
{0x6D, nullptr, "Unknown"},
|
|
|
|
{0x6E, nullptr, "Unknown"},
|
|
|
|
{0x6f, nullptr, "GetSystemInfo32"},
|
|
|
|
{0x70, nullptr, "CreatePort32"},
|
|
|
|
{0x71, nullptr, "ManageNamedPort32"},
|
|
|
|
{0x72, nullptr, "ConnectToPort32"},
|
|
|
|
{0x73, nullptr, "SetProcessMemoryPermission32"},
|
|
|
|
{0x74, nullptr, "Unknown"},
|
|
|
|
{0x75, nullptr, "Unknown"},
|
|
|
|
{0x76, nullptr, "Unknown"},
|
|
|
|
{0x77, nullptr, "MapProcessCodeMemory32"},
|
|
|
|
{0x78, nullptr, "UnmapProcessCodeMemory32"},
|
|
|
|
{0x79, nullptr, "Unknown"},
|
|
|
|
{0x7A, nullptr, "Unknown"},
|
|
|
|
{0x7B, nullptr, "TerminateProcess32"},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const FunctionDef SVC_Table_64[] = {
|
|
|
|
{0x00, nullptr, "Unknown"},
|
|
|
|
{0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"},
|
2020-04-09 06:14:18 +03:00
|
|
|
{0x02, nullptr, "SetMemoryPermission"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"},
|
|
|
|
{0x04, SvcWrap64<MapMemory>, "MapMemory"},
|
|
|
|
{0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"},
|
|
|
|
{0x06, SvcWrap64<QueryMemory>, "QueryMemory"},
|
|
|
|
{0x07, SvcWrap64<ExitProcess>, "ExitProcess"},
|
|
|
|
{0x08, SvcWrap64<CreateThread>, "CreateThread"},
|
|
|
|
{0x09, SvcWrap64<StartThread>, "StartThread"},
|
|
|
|
{0x0A, SvcWrap64<ExitThread>, "ExitThread"},
|
|
|
|
{0x0B, SvcWrap64<SleepThread>, "SleepThread"},
|
|
|
|
{0x0C, SvcWrap64<GetThreadPriority>, "GetThreadPriority"},
|
|
|
|
{0x0D, SvcWrap64<SetThreadPriority>, "SetThreadPriority"},
|
|
|
|
{0x0E, SvcWrap64<GetThreadCoreMask>, "GetThreadCoreMask"},
|
|
|
|
{0x0F, SvcWrap64<SetThreadCoreMask>, "SetThreadCoreMask"},
|
|
|
|
{0x10, SvcWrap64<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"},
|
|
|
|
{0x11, SvcWrap64<SignalEvent>, "SignalEvent"},
|
|
|
|
{0x12, SvcWrap64<ClearEvent>, "ClearEvent"},
|
|
|
|
{0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"},
|
2020-04-09 06:14:18 +03:00
|
|
|
{0x14, nullptr, "UnmapSharedMemory"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"},
|
|
|
|
{0x16, SvcWrap64<CloseHandle>, "CloseHandle"},
|
|
|
|
{0x17, SvcWrap64<ResetSignal>, "ResetSignal"},
|
|
|
|
{0x18, SvcWrap64<WaitSynchronization>, "WaitSynchronization"},
|
|
|
|
{0x19, SvcWrap64<CancelSynchronization>, "CancelSynchronization"},
|
|
|
|
{0x1A, SvcWrap64<ArbitrateLock>, "ArbitrateLock"},
|
|
|
|
{0x1B, SvcWrap64<ArbitrateUnlock>, "ArbitrateUnlock"},
|
|
|
|
{0x1C, SvcWrap64<WaitProcessWideKeyAtomic>, "WaitProcessWideKeyAtomic"},
|
|
|
|
{0x1D, SvcWrap64<SignalProcessWideKey>, "SignalProcessWideKey"},
|
|
|
|
{0x1E, SvcWrap64<GetSystemTick>, "GetSystemTick"},
|
|
|
|
{0x1F, SvcWrap64<ConnectToNamedPort>, "ConnectToNamedPort"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x20, nullptr, "SendSyncRequestLight"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x21, SvcWrap64<SendSyncRequest>, "SendSyncRequest"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x22, nullptr, "SendSyncRequestWithUserBuffer"},
|
|
|
|
{0x23, nullptr, "SendAsyncRequestWithUserBuffer"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x24, SvcWrap64<GetProcessId>, "GetProcessId"},
|
|
|
|
{0x25, SvcWrap64<GetThreadId>, "GetThreadId"},
|
|
|
|
{0x26, SvcWrap64<Break>, "Break"},
|
|
|
|
{0x27, SvcWrap64<OutputDebugString>, "OutputDebugString"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x28, nullptr, "ReturnFromException"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x29, SvcWrap64<GetInfo>, "GetInfo"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x2A, nullptr, "FlushEntireDataCache"},
|
|
|
|
{0x2B, nullptr, "FlushDataCache"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x2C, SvcWrap64<MapPhysicalMemory>, "MapPhysicalMemory"},
|
|
|
|
{0x2D, SvcWrap64<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
|
2018-09-24 03:03:38 +03:00
|
|
|
{0x2E, nullptr, "GetFutureThreadInfo"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x2F, nullptr, "GetLastThreadInfo"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x30, SvcWrap64<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},
|
|
|
|
{0x31, SvcWrap64<GetResourceLimitCurrentValue>, "GetResourceLimitCurrentValue"},
|
|
|
|
{0x32, SvcWrap64<SetThreadActivity>, "SetThreadActivity"},
|
|
|
|
{0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"},
|
|
|
|
{0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"},
|
|
|
|
{0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"},
|
2019-04-19 21:33:01 +03:00
|
|
|
{0x36, nullptr, "SynchronizePreemptionState"},
|
2017-10-15 00:30:07 +03:00
|
|
|
{0x37, nullptr, "Unknown"},
|
|
|
|
{0x38, nullptr, "Unknown"},
|
|
|
|
{0x39, nullptr, "Unknown"},
|
|
|
|
{0x3A, nullptr, "Unknown"},
|
|
|
|
{0x3B, nullptr, "Unknown"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x3C, SvcWrap64<KernelDebug>, "KernelDebug"},
|
|
|
|
{0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"},
|
2017-10-15 00:30:07 +03:00
|
|
|
{0x3E, nullptr, "Unknown"},
|
2016-09-18 03:38:01 +03:00
|
|
|
{0x3F, nullptr, "Unknown"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x40, nullptr, "CreateSession"},
|
|
|
|
{0x41, nullptr, "AcceptSession"},
|
|
|
|
{0x42, nullptr, "ReplyAndReceiveLight"},
|
|
|
|
{0x43, nullptr, "ReplyAndReceive"},
|
|
|
|
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x45, SvcWrap64<CreateEvent>, "CreateEvent"},
|
2016-09-18 03:38:01 +03:00
|
|
|
{0x46, nullptr, "Unknown"},
|
2017-10-15 00:30:07 +03:00
|
|
|
{0x47, nullptr, "Unknown"},
|
2018-09-24 03:03:38 +03:00
|
|
|
{0x48, nullptr, "MapPhysicalMemoryUnsafe"},
|
|
|
|
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
|
|
|
|
{0x4A, nullptr, "SetUnsafeLimit"},
|
|
|
|
{0x4B, nullptr, "CreateCodeMemory"},
|
|
|
|
{0x4C, nullptr, "ControlCodeMemory"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x4D, nullptr, "SleepSystem"},
|
|
|
|
{0x4E, nullptr, "ReadWriteRegister"},
|
|
|
|
{0x4F, nullptr, "SetProcessActivity"},
|
2020-04-09 06:14:18 +03:00
|
|
|
{0x50, nullptr, "CreateSharedMemory"},
|
|
|
|
{0x51, nullptr, "MapTransferMemory"},
|
|
|
|
{0x52, nullptr, "UnmapTransferMemory"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x53, nullptr, "CreateInterruptEvent"},
|
|
|
|
{0x54, nullptr, "QueryPhysicalAddress"},
|
|
|
|
{0x55, nullptr, "QueryIoMapping"},
|
|
|
|
{0x56, nullptr, "CreateDeviceAddressSpace"},
|
|
|
|
{0x57, nullptr, "AttachDeviceAddressSpace"},
|
|
|
|
{0x58, nullptr, "DetachDeviceAddressSpace"},
|
|
|
|
{0x59, nullptr, "MapDeviceAddressSpaceByForce"},
|
|
|
|
{0x5A, nullptr, "MapDeviceAddressSpaceAligned"},
|
|
|
|
{0x5B, nullptr, "MapDeviceAddressSpace"},
|
|
|
|
{0x5C, nullptr, "UnmapDeviceAddressSpace"},
|
|
|
|
{0x5D, nullptr, "InvalidateProcessDataCache"},
|
|
|
|
{0x5E, nullptr, "StoreProcessDataCache"},
|
|
|
|
{0x5F, nullptr, "FlushProcessDataCache"},
|
|
|
|
{0x60, nullptr, "DebugActiveProcess"},
|
|
|
|
{0x61, nullptr, "BreakDebugProcess"},
|
|
|
|
{0x62, nullptr, "TerminateDebugProcess"},
|
|
|
|
{0x63, nullptr, "GetDebugEvent"},
|
|
|
|
{0x64, nullptr, "ContinueDebugEvent"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x65, SvcWrap64<GetProcessList>, "GetProcessList"},
|
|
|
|
{0x66, SvcWrap64<GetThreadList>, "GetThreadList"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x67, nullptr, "GetDebugThreadContext"},
|
|
|
|
{0x68, nullptr, "SetDebugThreadContext"},
|
|
|
|
{0x69, nullptr, "QueryDebugProcessMemory"},
|
|
|
|
{0x6A, nullptr, "ReadDebugProcessMemory"},
|
|
|
|
{0x6B, nullptr, "WriteDebugProcessMemory"},
|
|
|
|
{0x6C, nullptr, "SetHardwareBreakPoint"},
|
|
|
|
{0x6D, nullptr, "GetDebugThreadParam"},
|
2016-09-18 03:38:01 +03:00
|
|
|
{0x6E, nullptr, "Unknown"},
|
2018-09-24 03:03:38 +03:00
|
|
|
{0x6F, nullptr, "GetSystemInfo"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x70, nullptr, "CreatePort"},
|
|
|
|
{0x71, nullptr, "ManageNamedPort"},
|
|
|
|
{0x72, nullptr, "ConnectToPort"},
|
|
|
|
{0x73, nullptr, "SetProcessMemoryPermission"},
|
|
|
|
{0x74, nullptr, "MapProcessMemory"},
|
|
|
|
{0x75, nullptr, "UnmapProcessMemory"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
|
2020-04-24 01:05:09 +03:00
|
|
|
{0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
|
|
|
|
{0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x79, nullptr, "CreateProcess"},
|
|
|
|
{0x7A, nullptr, "StartProcess"},
|
|
|
|
{0x7B, nullptr, "TerminateProcess"},
|
2020-03-02 08:06:41 +03:00
|
|
|
{0x7C, SvcWrap64<GetProcessInfo>, "GetProcessInfo"},
|
|
|
|
{0x7D, SvcWrap64<CreateResourceLimit>, "CreateResourceLimit"},
|
|
|
|
{0x7E, SvcWrap64<SetResourceLimitLimitValue>, "SetResourceLimitLimitValue"},
|
2018-01-03 04:47:26 +03:00
|
|
|
{0x7F, nullptr, "CallSecureMonitor"},
|
2014-04-11 03:58:28 +04:00
|
|
|
};
|
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
static const FunctionDef* GetSVCInfo32(u32 func_num) {
|
|
|
|
if (func_num >= std::size(SVC_Table_32)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return &SVC_Table_32[func_num];
|
|
|
|
}
|
|
|
|
|
|
|
|
static const FunctionDef* GetSVCInfo64(u32 func_num) {
|
|
|
|
if (func_num >= std::size(SVC_Table_64)) {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
|
2015-05-06 06:04:25 +03:00
|
|
|
return nullptr;
|
|
|
|
}
|
2020-03-02 08:06:41 +03:00
|
|
|
return &SVC_Table_64[func_num];
|
2015-05-06 06:04:25 +03:00
|
|
|
}
|
|
|
|
|
2020-03-27 03:00:30 +03:00
|
|
|
void Call(Core::System& system, u32 immediate) {
|
2020-03-12 23:48:43 +03:00
|
|
|
system.ExitDynarmicProfile();
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
kernel.EnterSVCProfile();
|
2015-05-06 06:04:25 +03:00
|
|
|
|
2020-12-03 05:08:35 +03:00
|
|
|
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
2021-01-21 00:42:27 +03:00
|
|
|
thread->SetIsCallingSvc();
|
2020-11-13 22:11:12 +03:00
|
|
|
|
2020-03-02 08:06:41 +03:00
|
|
|
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
|
|
|
|
: GetSVCInfo32(immediate);
|
2015-05-06 06:04:25 +03:00
|
|
|
if (info) {
|
|
|
|
if (info->func) {
|
2019-04-07 01:46:18 +03:00
|
|
|
info->func(system);
|
2015-05-06 06:04:25 +03:00
|
|
|
} else {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
|
2015-05-06 06:04:25 +03:00
|
|
|
}
|
2017-10-15 00:30:07 +03:00
|
|
|
} else {
|
2018-07-02 19:13:26 +03:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
|
2015-05-06 06:04:25 +03:00
|
|
|
}
|
2020-03-11 01:41:11 +03:00
|
|
|
|
2020-03-12 23:48:43 +03:00
|
|
|
kernel.ExitSVCProfile();
|
2020-11-13 22:11:12 +03:00
|
|
|
|
2021-01-21 00:42:27 +03:00
|
|
|
if (!thread->IsCallingSvc()) {
|
2021-02-27 22:56:04 +03:00
|
|
|
thread->GetHostContext()->Rewind();
|
2020-11-13 22:11:12 +03:00
|
|
|
}
|
|
|
|
|
2020-03-12 23:48:43 +03:00
|
|
|
system.EnterDynarmicProfile();
|
2014-04-11 03:58:28 +04:00
|
|
|
}
|
2014-04-12 02:44:21 +04:00
|
|
|
|
2020-03-27 03:00:30 +03:00
|
|
|
} // namespace Kernel::Svc
|