2015-05-13 05:38:56 +03:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
2014-12-17 08:38:14 +03:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-04-09 03:15:46 +04:00
|
|
|
// Refer to the license.txt file included.
|
2013-09-19 07:52:51 +04:00
|
|
|
|
2018-01-27 18:16:39 +03:00
|
|
|
#include <algorithm>
|
2015-09-10 06:23:44 +03:00
|
|
|
#include <cstring>
|
2018-10-30 07:03:25 +03:00
|
|
|
#include <optional>
|
2018-07-19 02:02:47 +03:00
|
|
|
#include <utility>
|
|
|
|
|
2015-05-13 05:38:56 +03:00
|
|
|
#include "common/assert.h"
|
2020-03-08 01:59:42 +03:00
|
|
|
#include "common/atomic_ops.h"
|
2015-05-06 10:06:12 +03:00
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/logging/log.h"
|
2019-03-02 23:20:28 +03:00
|
|
|
#include "common/page_table.h"
|
2015-05-06 10:06:12 +03:00
|
|
|
#include "common/swap.h"
|
2017-09-25 00:44:13 +03:00
|
|
|
#include "core/arm/arm_interface.h"
|
|
|
|
#include "core/core.h"
|
2020-04-09 05:50:46 +03:00
|
|
|
#include "core/device_memory.h"
|
2021-02-13 04:58:31 +03:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-04-24 08:04:28 +03:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2020-01-12 19:04:15 +03:00
|
|
|
#include "core/hle/kernel/physical_memory.h"
|
2016-09-21 09:52:38 +03:00
|
|
|
#include "core/memory.h"
|
2019-02-23 07:38:45 +03:00
|
|
|
#include "video_core/gpu.h"
|
2016-04-17 01:57:57 +03:00
|
|
|
|
2020-03-31 22:10:44 +03:00
|
|
|
namespace Core::Memory {
|
2015-05-13 05:38:56 +03:00
|
|
|
|
2019-11-26 20:33:20 +03:00
|
|
|
// Implementation class used to keep the specifics of the memory subsystem hidden
|
|
|
|
// from outside classes. This also allows modification to the internals of the memory
|
|
|
|
// subsystem without needing to rebuild all files that make use of the memory interface.
|
|
|
|
struct Memory::Impl {
|
|
|
|
explicit Impl(Core::System& system_) : system{system_} {}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-04-09 05:50:46 +03:00
|
|
|
current_page_table = &process.PageTable().PageTableImpl();
|
2019-11-27 02:34:30 +03:00
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth();
|
2019-11-27 02:34:30 +03:00
|
|
|
|
2020-02-25 05:04:12 +03:00
|
|
|
system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
|
2019-11-27 02:34:30 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2019-11-26 21:09:12 +03:00
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
|
|
|
|
}
|
2019-11-26 20:33:20 +03:00
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
|
|
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
2020-04-09 05:50:46 +03:00
|
|
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
|
2019-11-26 21:09:12 +03:00
|
|
|
}
|
2018-05-03 05:36:51 +03:00
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType();
|
|
|
|
return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory;
|
2019-11-26 21:46:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool IsValidVirtualAddress(VAddr vaddr) const {
|
|
|
|
return IsValidVirtualAddress(*system.CurrentProcess(), vaddr);
|
|
|
|
}
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
|
|
|
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
|
2019-11-27 02:28:44 +03:00
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
if (!paddr) {
|
|
|
|
return {};
|
2019-11-27 02:28:44 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
return system.DeviceMemory().GetPointer(paddr) + vaddr;
|
2019-11-27 02:28:44 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* GetPointer(const VAddr vaddr) const {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
|
|
|
return pointer + vaddr;
|
2019-11-26 23:19:15 +03:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto type = Common::PageTable::PageInfo::ExtractType(raw_pointer);
|
|
|
|
if (type == Common::PageType::RasterizerCachedMemory) {
|
2020-04-09 05:50:46 +03:00
|
|
|
return GetPointerFromRasterizerCachedMemory(vaddr);
|
2019-11-26 23:19:15 +03:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
return nullptr;
|
2019-11-26 23:19:15 +03:00
|
|
|
}
|
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
u8 Read8(const VAddr addr) {
|
|
|
|
return Read<u8>(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Read16(const VAddr addr) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
return Read<u16_le>(addr);
|
|
|
|
} else {
|
2020-10-13 15:10:50 +03:00
|
|
|
const u32 a{Read<u8>(addr)};
|
|
|
|
const u32 b{Read<u8>(addr + sizeof(u8))};
|
|
|
|
return static_cast<u16>((b << 8) | a);
|
2020-04-09 06:03:25 +03:00
|
|
|
}
|
2019-11-27 00:29:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 Read32(const VAddr addr) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
return Read<u32_le>(addr);
|
|
|
|
} else {
|
2020-10-13 15:10:50 +03:00
|
|
|
const u32 a{Read16(addr)};
|
|
|
|
const u32 b{Read16(addr + sizeof(u16))};
|
|
|
|
return (b << 16) | a;
|
2020-04-09 06:03:25 +03:00
|
|
|
}
|
2019-11-27 00:29:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 Read64(const VAddr addr) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
return Read<u64_le>(addr);
|
|
|
|
} else {
|
|
|
|
const u32 a{Read32(addr)};
|
|
|
|
const u32 b{Read32(addr + sizeof(u32))};
|
|
|
|
return (static_cast<u64>(b) << 32) | a;
|
|
|
|
}
|
2019-11-27 00:29:34 +03:00
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
void Write8(const VAddr addr, const u8 data) {
|
|
|
|
Write<u8>(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Write16(const VAddr addr, const u16 data) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 1) == 0) {
|
|
|
|
Write<u16_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write<u8>(addr, static_cast<u8>(data));
|
|
|
|
Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8));
|
|
|
|
}
|
2019-11-27 01:39:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write32(const VAddr addr, const u32 data) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 3) == 0) {
|
|
|
|
Write<u32_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write16(addr, static_cast<u16>(data));
|
|
|
|
Write16(addr + sizeof(u16), static_cast<u16>(data >> 16));
|
|
|
|
}
|
2019-11-27 01:39:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void Write64(const VAddr addr, const u64 data) {
|
2020-04-09 06:03:25 +03:00
|
|
|
if ((addr & 7) == 0) {
|
|
|
|
Write<u64_le>(addr, data);
|
|
|
|
} else {
|
|
|
|
Write32(addr, static_cast<u32>(data));
|
|
|
|
Write32(addr + sizeof(u32), static_cast<u32>(data >> 32));
|
|
|
|
}
|
2019-11-27 01:39:57 +03:00
|
|
|
}
|
|
|
|
|
2020-03-08 01:59:42 +03:00
|
|
|
bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
|
|
|
|
return WriteExclusive<u8>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
|
|
|
|
return WriteExclusive<u16_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
|
|
|
|
return WriteExclusive<u32_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
|
|
|
|
return WriteExclusive<u64_le>(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:48:19 +03:00
|
|
|
std::string ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
std::string string;
|
|
|
|
string.reserve(max_length);
|
|
|
|
for (std::size_t i = 0; i < max_length; ++i) {
|
2020-10-21 05:07:39 +03:00
|
|
|
const char c = Read8(vaddr);
|
2019-11-26 23:48:19 +03:00
|
|
|
if (c == '\0') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
string.push_back(c);
|
|
|
|
++vaddr;
|
|
|
|
}
|
|
|
|
string.shrink_to_fit();
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
2019-11-27 00:29:34 +03:00
|
|
|
const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-27 00:29:34 +03:00
|
|
|
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-27 00:29:34 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2019-11-27 00:29:34 +03:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2019-11-27 00:29:34 +03:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
2020-04-06 00:23:49 +03:00
|
|
|
const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2020-04-06 00:23:49 +03:00
|
|
|
|
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2020-04-06 00:23:49 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
std::memset(dest_buffer, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2020-04-06 00:23:49 +03:00
|
|
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-06 00:23:49 +03:00
|
|
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-06 00:23:49 +03:00
|
|
|
void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void WriteBlock(const Kernel::KProcess& process, const VAddr dest_addr, const void* src_buffer,
|
2019-11-27 01:39:57 +03:00
|
|
|
const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-27 01:39:57 +03:00
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-27 01:39:57 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2019-11-27 01:39:57 +03:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-27 01:39:57 +03:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void WriteBlockUnsafe(const Kernel::KProcess& process, const VAddr dest_addr,
|
2020-04-06 00:23:49 +03:00
|
|
|
const void* src_buffer, const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2020-04-06 00:23:49 +03:00
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2020-04-06 00:23:49 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2020-04-06 00:23:49 +03:00
|
|
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-06 00:23:49 +03:00
|
|
|
std::memcpy(host_ptr, src_buffer, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2020-04-06 00:23:49 +03:00
|
|
|
void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-27 00:06:49 +03:00
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = dest_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = dest_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-27 00:06:49 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, dest_addr, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2019-11-27 00:06:49 +03:00
|
|
|
std::memset(dest_ptr, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().InvalidateRegion(current_vaddr, copy_amount);
|
2019-11-27 00:06:49 +03:00
|
|
|
std::memset(host_ptr, 0, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ZeroBlock(const VAddr dest_addr, const std::size_t size) {
|
|
|
|
ZeroBlock(*system.CurrentProcess(), dest_addr, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-27 00:06:49 +03:00
|
|
|
const std::size_t size) {
|
2020-04-09 05:50:46 +03:00
|
|
|
const auto& page_table = process.PageTable().PageTableImpl();
|
2019-11-27 00:06:49 +03:00
|
|
|
std::size_t remaining_size = size;
|
|
|
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
|
|
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
|
|
|
|
|
|
|
while (remaining_size > 0) {
|
|
|
|
const std::size_t copy_amount =
|
|
|
|
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
|
|
|
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
|
|
|
switch (type) {
|
2019-11-27 00:06:49 +03:00
|
|
|
case Common::PageType::Unmapped: {
|
|
|
|
LOG_ERROR(HW_Memory,
|
|
|
|
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
|
|
|
|
current_vaddr, src_addr, size);
|
|
|
|
ZeroBlock(process, dest_addr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::Memory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
DEBUG_ASSERT(pointer);
|
|
|
|
const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
2019-11-27 00:06:49 +03:00
|
|
|
WriteBlock(process, dest_addr, src_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().FlushRegion(current_vaddr, copy_amount);
|
2019-11-27 00:06:49 +03:00
|
|
|
WriteBlock(process, dest_addr, host_ptr, copy_amount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
page_index++;
|
|
|
|
page_offset = 0;
|
|
|
|
dest_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
src_addr += static_cast<VAddr>(copy_amount);
|
|
|
|
remaining_size -= copy_amount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
|
|
|
|
return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:56:13 +03:00
|
|
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
if (vaddr == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
|
|
|
|
// address space, marking the region as un/cached. The region is marked un/cached at a
|
|
|
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
|
|
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
|
|
|
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
|
|
|
const Common::PageType page_type{
|
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
|
2019-11-26 23:56:13 +03:00
|
|
|
if (cached) {
|
|
|
|
// Switch page type to cached if now cached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
nullptr, Common::PageType::RasterizerCachedMemory);
|
2019-11-26 23:56:13 +03:00
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already marked as cached.
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Switch page type to uncached if now uncached
|
|
|
|
switch (page_type) {
|
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
// It is not necessary for a process to have this region mapped into its address
|
|
|
|
// space, for example, a system module need not have a VRAM mapping.
|
|
|
|
break;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
|
|
|
// that this area is already unmarked as cached.
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
|
2019-11-26 23:56:13 +03:00
|
|
|
if (pointer == nullptr) {
|
|
|
|
// It's possible that this function has been called while updating the
|
|
|
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
|
|
|
// longer exist, and we should just leave the pagetable entry blank.
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
nullptr, Common::PageType::Unmapped);
|
2019-11-26 23:56:13 +03:00
|
|
|
} else {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
|
|
|
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
|
2019-11-26 23:56:13 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
/**
|
|
|
|
* Maps a region of pages as a specific type.
|
|
|
|
*
|
|
|
|
* @param page_table The page table to use to perform the mapping.
|
|
|
|
* @param base The base address to begin mapping at.
|
|
|
|
* @param size The total size of the range in bytes.
|
2020-09-23 20:39:00 +03:00
|
|
|
* @param target The target address to begin mapping from.
|
2019-11-26 21:09:12 +03:00
|
|
|
* @param type The page type to map the memory as.
|
|
|
|
*/
|
2020-04-09 05:50:46 +03:00
|
|
|
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
|
2019-11-26 21:09:12 +03:00
|
|
|
Common::PageType type) {
|
2020-04-09 05:50:46 +03:00
|
|
|
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE,
|
2019-11-26 21:09:12 +03:00
|
|
|
(base + size) * PAGE_SIZE);
|
|
|
|
|
|
|
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
|
|
|
if (system.IsPoweredOn()) {
|
|
|
|
auto& gpu = system.GPU();
|
|
|
|
for (u64 i = 0; i < size; i++) {
|
|
|
|
const auto page = base + i;
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
2019-11-26 21:09:12 +03:00
|
|
|
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
|
|
|
|
}
|
2019-09-19 04:50:21 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-23 05:56:41 +03:00
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
const VAddr end = base + size;
|
|
|
|
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
|
|
|
base + page_table.pointers.size());
|
2015-05-13 05:38:56 +03:00
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
if (!target) {
|
2020-07-05 13:25:08 +03:00
|
|
|
ASSERT_MSG(type != Common::PageType::Memory,
|
|
|
|
"Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
page_table.pointers[base].Store(nullptr, type);
|
2020-04-09 05:50:46 +03:00
|
|
|
page_table.backing_addr[base] = 0;
|
2015-05-13 05:38:56 +03:00
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
base += 1;
|
|
|
|
}
|
2019-11-26 21:09:12 +03:00
|
|
|
} else {
|
|
|
|
while (base != end) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
page_table.pointers[base].Store(
|
|
|
|
system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type);
|
2020-04-09 05:50:46 +03:00
|
|
|
page_table.backing_addr[base] = target - (base << PAGE_BITS);
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
2019-12-31 02:11:45 +03:00
|
|
|
"memory mapping base yield a nullptr within the table");
|
2019-02-28 01:22:47 +03:00
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
base += 1;
|
2020-04-09 05:50:46 +03:00
|
|
|
target += PAGE_SIZE;
|
2019-11-26 21:09:12 +03:00
|
|
|
}
|
2019-02-28 01:22:47 +03:00
|
|
|
}
|
2014-04-02 02:18:02 +04:00
|
|
|
}
|
2013-09-19 07:52:51 +04:00
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
/**
|
|
|
|
* Reads a particular data type out of memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to read the data type from.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to read out of memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*
|
|
|
|
* @returns The instance of T read from the specified virtual address.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T Read(const VAddr vaddr) {
|
2020-12-25 08:51:49 +03:00
|
|
|
// Avoid adding any extra logic to this fast-path block
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
|
|
|
if (const u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
2019-11-27 00:29:34 +03:00
|
|
|
T value;
|
2020-12-25 08:51:49 +03:00
|
|
|
std::memcpy(&value, &pointer[vaddr], sizeof(T));
|
2019-11-27 00:29:34 +03:00
|
|
|
return value;
|
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2019-11-27 00:29:34 +03:00
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
|
|
|
return 0;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().FlushRegion(vaddr, sizeof(T));
|
2019-11-27 00:29:34 +03:00
|
|
|
T value;
|
|
|
|
std::memcpy(&value, host_ptr, sizeof(T));
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
/**
|
|
|
|
* Writes a particular data type to memory at the given virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr The virtual address to write the data type to.
|
|
|
|
*
|
|
|
|
* @tparam T The data type to write to memory. This type *must* be
|
|
|
|
* trivially copyable, otherwise the behavior of this function
|
|
|
|
* is undefined.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void Write(const VAddr vaddr, const T data) {
|
2020-12-25 08:51:49 +03:00
|
|
|
// Avoid adding any extra logic to this fast-path block
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
2020-12-25 08:51:49 +03:00
|
|
|
std::memcpy(&pointer[vaddr], &data, sizeof(T));
|
2019-11-27 01:39:57 +03:00
|
|
|
return;
|
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2019-11-27 01:39:57 +03:00
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
|
|
|
|
static_cast<u32>(data), vaddr);
|
|
|
|
return;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-04-09 05:50:46 +03:00
|
|
|
u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
2020-04-05 19:58:23 +03:00
|
|
|
system.GPU().InvalidateRegion(vaddr, sizeof(T));
|
2019-11-27 01:39:57 +03:00
|
|
|
std::memcpy(host_ptr, &data, sizeof(T));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-08 01:59:42 +03:00
|
|
|
template <typename T>
|
|
|
|
bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
2020-03-08 01:59:42 +03:00
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile T*>(&pointer[vaddr]);
|
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-08 01:59:42 +03:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2020-03-08 01:59:42 +03:00
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
|
|
|
|
static_cast<u32>(data), vaddr);
|
|
|
|
return true;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-03-09 05:39:41 +03:00
|
|
|
u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
|
|
|
system.GPU().InvalidateRegion(vaddr, sizeof(T));
|
2020-07-28 11:32:39 +03:00
|
|
|
auto* pointer = reinterpret_cast<volatile T*>(&host_ptr);
|
2020-03-08 01:59:42 +03:00
|
|
|
return Common::AtomicCompareAndSwap(pointer, data, expected);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
|
|
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
2020-03-08 01:59:42 +03:00
|
|
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
const auto volatile_pointer = reinterpret_cast<volatile u64*>(&pointer[vaddr]);
|
|
|
|
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
|
2020-03-08 01:59:42 +03:00
|
|
|
}
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 03:16:57 +03:00
|
|
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
2020-03-08 01:59:42 +03:00
|
|
|
case Common::PageType::Unmapped:
|
|
|
|
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
|
|
|
|
static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
|
|
|
|
return true;
|
|
|
|
case Common::PageType::Memory:
|
|
|
|
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
|
|
|
|
break;
|
|
|
|
case Common::PageType::RasterizerCachedMemory: {
|
2020-03-09 05:39:41 +03:00
|
|
|
u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)};
|
|
|
|
system.GPU().InvalidateRegion(vaddr, sizeof(u128));
|
2020-07-28 11:32:39 +03:00
|
|
|
auto* pointer = reinterpret_cast<volatile u64*>(&host_ptr);
|
2020-03-08 01:59:42 +03:00
|
|
|
return Common::AtomicCompareAndSwap(pointer, data, expected);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-27 02:34:30 +03:00
|
|
|
Common::PageTable* current_page_table = nullptr;
|
2019-11-26 21:09:12 +03:00
|
|
|
Core::System& system;
|
|
|
|
};
|
2014-04-26 09:27:25 +04:00
|
|
|
|
2021-04-03 03:06:21 +03:00
|
|
|
Memory::Memory(Core::System& system_) : system{system_} {
|
|
|
|
Reset();
|
|
|
|
}
|
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
Memory::~Memory() = default;
|
2016-01-30 21:41:04 +03:00
|
|
|
|
2021-04-03 03:06:21 +03:00
|
|
|
void Memory::Reset() {
|
|
|
|
impl = std::make_unique<Impl>(system);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
|
2020-02-25 05:04:12 +03:00
|
|
|
impl->SetCurrentPageTable(process, core_id);
|
2019-11-27 02:34:30 +03:00
|
|
|
}
|
|
|
|
|
2020-04-09 05:50:46 +03:00
|
|
|
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
2019-11-26 21:09:12 +03:00
|
|
|
impl->MapMemoryRegion(page_table, base, size, target);
|
2015-05-13 05:38:56 +03:00
|
|
|
}
|
2014-12-30 06:35:06 +03:00
|
|
|
|
2019-11-26 21:09:12 +03:00
|
|
|
void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
|
|
|
impl->UnmapRegion(page_table, base, size);
|
|
|
|
}
|
2016-04-17 01:57:57 +03:00
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
bool Memory::IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const {
|
2019-11-26 21:46:41 +03:00
|
|
|
return impl->IsValidVirtualAddress(process, vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
|
|
|
return impl->IsValidVirtualAddress(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:19:15 +03:00
|
|
|
u8* Memory::GetPointer(VAddr vaddr) {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
const u8* Memory::GetPointer(VAddr vaddr) const {
|
|
|
|
return impl->GetPointer(vaddr);
|
|
|
|
}
|
|
|
|
|
2019-11-27 00:29:34 +03:00
|
|
|
u8 Memory::Read8(const VAddr addr) {
|
|
|
|
return impl->Read8(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 Memory::Read16(const VAddr addr) {
|
|
|
|
return impl->Read16(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Memory::Read32(const VAddr addr) {
|
|
|
|
return impl->Read32(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 Memory::Read64(const VAddr addr) {
|
|
|
|
return impl->Read64(addr);
|
|
|
|
}
|
|
|
|
|
2019-11-27 01:39:57 +03:00
|
|
|
void Memory::Write8(VAddr addr, u8 data) {
|
|
|
|
impl->Write8(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write16(VAddr addr, u16 data) {
|
|
|
|
impl->Write16(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write32(VAddr addr, u32 data) {
|
|
|
|
impl->Write32(addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::Write64(VAddr addr, u64 data) {
|
|
|
|
impl->Write64(addr, data);
|
|
|
|
}
|
|
|
|
|
2020-03-08 01:59:42 +03:00
|
|
|
bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
|
|
|
|
return impl->WriteExclusive8(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
|
|
|
|
return impl->WriteExclusive16(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
|
|
|
|
return impl->WriteExclusive32(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
|
|
|
|
return impl->WriteExclusive64(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
|
|
|
|
return impl->WriteExclusive128(addr, data, expected);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:48:19 +03:00
|
|
|
std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|
|
|
return impl->ReadCString(vaddr, max_length);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
|
2019-11-27 00:29:34 +03:00
|
|
|
const std::size_t size) {
|
|
|
|
impl->ReadBlock(process, src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlock(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::ReadBlockUnsafe(const Kernel::KProcess& process, const VAddr src_addr,
|
2020-04-06 00:23:49 +03:00
|
|
|
void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
|
|
|
|
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer,
|
2019-11-27 01:39:57 +03:00
|
|
|
std::size_t size) {
|
|
|
|
impl->WriteBlock(process, dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
|
|
|
impl->WriteBlock(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::WriteBlockUnsafe(const Kernel::KProcess& process, VAddr dest_addr,
|
2020-04-06 00:23:49 +03:00
|
|
|
const void* src_buffer, std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer,
|
|
|
|
const std::size_t size) {
|
|
|
|
impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
|
2019-11-27 00:06:49 +03:00
|
|
|
impl->ZeroBlock(process, dest_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) {
|
|
|
|
impl->ZeroBlock(dest_addr, size);
|
|
|
|
}
|
|
|
|
|
2021-04-24 08:04:28 +03:00
|
|
|
void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
|
2019-11-27 00:06:49 +03:00
|
|
|
const std::size_t size) {
|
|
|
|
impl->CopyBlock(process, dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
|
|
|
|
impl->CopyBlock(dest_addr, src_addr, size);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:56:13 +03:00
|
|
|
void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
|
|
|
impl->RasterizerMarkRegionCached(vaddr, size, cached);
|
|
|
|
}
|
|
|
|
|
2018-06-22 09:47:59 +03:00
|
|
|
bool IsKernelVirtualAddress(const VAddr vaddr) {
|
|
|
|
return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
|
|
|
|
}
|
|
|
|
|
2020-03-31 22:10:44 +03:00
|
|
|
} // namespace Core::Memory
|