Browse Source
Merge pull request #6422 from FernandoS27/i-am-the-senate
Merge pull request #6422 from FernandoS27/i-am-the-senate
Implement/Port Fastmem from Citra to Yuzunce_cpp
committed by
GitHub
23 changed files with 951 additions and 44 deletions
-
2externals/dynarmic
-
2src/common/CMakeLists.txt
-
538src/common/host_memory.cpp
-
70src/common/host_memory.h
-
2src/common/page_table.h
-
8src/common/settings.cpp
-
4src/common/settings.h
-
6src/core/arm/dynarmic/arm_dynarmic_32.cpp
-
12src/core/arm/dynarmic/arm_dynarmic_64.cpp
-
2src/core/device_memory.cpp
-
17src/core/device_memory.h
-
18src/core/memory.cpp
-
1src/tests/CMakeLists.txt
-
183src/tests/common/host_memory.cpp
-
18src/video_core/gpu_thread.cpp
-
56src/video_core/rasterizer_accelerated.cpp
-
7src/yuzu/configuration/config.cpp
-
9src/yuzu/configuration/configure_cpu.cpp
-
1src/yuzu/configuration/configure_cpu.h
-
12src/yuzu/configuration/configure_cpu.ui
-
3src/yuzu/configuration/configure_cpu_debug.cpp
-
14src/yuzu/configuration/configure_cpu_debug.ui
-
4src/yuzu_cmd/default_ini.h
@ -1 +1 @@ |
|||||
Subproject commit 828959caedfac2d456a0c877fda4612e35fffc03 |
|
||||
|
Subproject commit 0c12614d1a7a72d778609920dde96a4c63074ece |
||||
@ -0,0 +1,538 @@ |
|||||
|
#ifdef _WIN32
|
||||
|
|
||||
|
#include <iterator>
|
||||
|
#include <unordered_map>
|
||||
|
#include <boost/icl/separate_interval_set.hpp>
|
||||
|
#include <windows.h>
|
||||
|
#include "common/dynamic_library.h"
|
||||
|
|
||||
|
#elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv
|
||||
|
|
||||
|
#ifndef _GNU_SOURCE
|
||||
|
#define _GNU_SOURCE
|
||||
|
#endif
|
||||
|
#include <fcntl.h>
|
||||
|
#include <sys/mman.h>
|
||||
|
#include <unistd.h>
|
||||
|
|
||||
|
#endif // ^^^ Linux ^^^
|
||||
|
|
||||
|
#include <mutex>
|
||||
|
|
||||
|
#include "common/alignment.h"
|
||||
|
#include "common/assert.h"
|
||||
|
#include "common/host_memory.h"
|
||||
|
#include "common/logging/log.h"
|
||||
|
#include "common/scope_exit.h"
|
||||
|
|
||||
|
namespace Common { |
||||
|
|
||||
|
constexpr size_t PageAlignment = 0x1000; |
||||
|
constexpr size_t HugePageSize = 0x200000; |
||||
|
|
||||
|
#ifdef _WIN32
|
||||
|
|
||||
|
// Manually imported for MinGW compatibility
|
||||
|
#ifndef MEM_RESERVE_PLACEHOLDER
|
||||
|
#define MEM_RESERVE_PLACEHOLDER 0x0004000
|
||||
|
#endif
|
||||
|
#ifndef MEM_REPLACE_PLACEHOLDER
|
||||
|
#define MEM_REPLACE_PLACEHOLDER 0x00004000
|
||||
|
#endif
|
||||
|
#ifndef MEM_COALESCE_PLACEHOLDERS
|
||||
|
#define MEM_COALESCE_PLACEHOLDERS 0x00000001
|
||||
|
#endif
|
||||
|
#ifndef MEM_PRESERVE_PLACEHOLDER
|
||||
|
#define MEM_PRESERVE_PLACEHOLDER 0x00000002
|
||||
|
#endif
|
||||
|
|
||||
|
using PFN_CreateFileMapping2 = _Ret_maybenull_ HANDLE(WINAPI*)( |
||||
|
_In_ HANDLE File, _In_opt_ SECURITY_ATTRIBUTES* SecurityAttributes, _In_ ULONG DesiredAccess, |
||||
|
_In_ ULONG PageProtection, _In_ ULONG AllocationAttributes, _In_ ULONG64 MaximumSize, |
||||
|
_In_opt_ PCWSTR Name, |
||||
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, |
||||
|
_In_ ULONG ParameterCount); |
||||
|
|
||||
|
using PFN_VirtualAlloc2 = _Ret_maybenull_ PVOID(WINAPI*)( |
||||
|
_In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, _In_ SIZE_T Size, |
||||
|
_In_ ULONG AllocationType, _In_ ULONG PageProtection, |
||||
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, |
||||
|
_In_ ULONG ParameterCount); |
||||
|
|
||||
|
using PFN_MapViewOfFile3 = _Ret_maybenull_ PVOID(WINAPI*)( |
||||
|
_In_ HANDLE FileMapping, _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, |
||||
|
_In_ ULONG64 Offset, _In_ SIZE_T ViewSize, _In_ ULONG AllocationType, _In_ ULONG PageProtection, |
||||
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters, |
||||
|
_In_ ULONG ParameterCount); |
||||
|
|
||||
|
using PFN_UnmapViewOfFile2 = BOOL(WINAPI*)(_In_ HANDLE Process, _In_ PVOID BaseAddress, |
||||
|
_In_ ULONG UnmapFlags); |
||||
|
|
||||
|
template <typename T> |
||||
|
static void GetFuncAddress(Common::DynamicLibrary& dll, const char* name, T& pfn) { |
||||
|
if (!dll.GetSymbol(name, &pfn)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to load {}", name); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
class HostMemory::Impl { |
||||
|
public: |
||||
|
explicit Impl(size_t backing_size_, size_t virtual_size_) |
||||
|
: backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()}, |
||||
|
kernelbase_dll("Kernelbase") { |
||||
|
if (!kernelbase_dll.IsOpen()) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to load Kernelbase.dll"); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
GetFuncAddress(kernelbase_dll, "CreateFileMapping2", pfn_CreateFileMapping2); |
||||
|
GetFuncAddress(kernelbase_dll, "VirtualAlloc2", pfn_VirtualAlloc2); |
||||
|
GetFuncAddress(kernelbase_dll, "MapViewOfFile3", pfn_MapViewOfFile3); |
||||
|
GetFuncAddress(kernelbase_dll, "UnmapViewOfFile2", pfn_UnmapViewOfFile2); |
||||
|
|
||||
|
// Allocate backing file map
|
||||
|
backing_handle = |
||||
|
pfn_CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ, |
||||
|
PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0); |
||||
|
if (!backing_handle) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory", |
||||
|
backing_size >> 20); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
// Allocate a virtual memory for the backing file map as placeholder
|
||||
|
backing_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, backing_size, |
||||
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, |
||||
|
PAGE_NOACCESS, nullptr, 0)); |
||||
|
if (!backing_base) { |
||||
|
Release(); |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory", |
||||
|
backing_size >> 20); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
// Map backing placeholder
|
||||
|
void* const ret = pfn_MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size, |
||||
|
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0); |
||||
|
if (ret != backing_base) { |
||||
|
Release(); |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
// Allocate virtual address placeholder
|
||||
|
virtual_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, virtual_size, |
||||
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, |
||||
|
PAGE_NOACCESS, nullptr, 0)); |
||||
|
if (!virtual_base) { |
||||
|
Release(); |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to reserve {} GiB of virtual memory", |
||||
|
virtual_size >> 30); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
~Impl() { |
||||
|
Release(); |
||||
|
} |
||||
|
|
||||
|
void Map(size_t virtual_offset, size_t host_offset, size_t length) { |
||||
|
std::unique_lock lock{placeholder_mutex}; |
||||
|
if (!IsNiechePlaceholder(virtual_offset, length)) { |
||||
|
Split(virtual_offset, length); |
||||
|
} |
||||
|
ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end()); |
||||
|
TrackPlaceholder(virtual_offset, host_offset, length); |
||||
|
|
||||
|
MapView(virtual_offset, host_offset, length); |
||||
|
} |
||||
|
|
||||
|
void Unmap(size_t virtual_offset, size_t length) { |
||||
|
std::lock_guard lock{placeholder_mutex}; |
||||
|
|
||||
|
// Unmap until there are no more placeholders
|
||||
|
while (UnmapOnePlaceholder(virtual_offset, length)) { |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write) { |
||||
|
DWORD new_flags{}; |
||||
|
if (read && write) { |
||||
|
new_flags = PAGE_READWRITE; |
||||
|
} else if (read && !write) { |
||||
|
new_flags = PAGE_READONLY; |
||||
|
} else if (!read && !write) { |
||||
|
new_flags = PAGE_NOACCESS; |
||||
|
} else { |
||||
|
UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write); |
||||
|
} |
||||
|
const size_t virtual_end = virtual_offset + length; |
||||
|
|
||||
|
std::lock_guard lock{placeholder_mutex}; |
||||
|
auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end}); |
||||
|
while (it != end) { |
||||
|
const size_t offset = std::max(it->lower(), virtual_offset); |
||||
|
const size_t protect_length = std::min(it->upper(), virtual_end) - offset; |
||||
|
DWORD old_flags{}; |
||||
|
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules"); |
||||
|
} |
||||
|
++it; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
const size_t backing_size; ///< Size of the backing memory in bytes
|
||||
|
const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
|
||||
|
|
||||
|
u8* backing_base{}; |
||||
|
u8* virtual_base{}; |
||||
|
|
||||
|
private: |
||||
|
/// Release all resources in the object
|
||||
|
void Release() { |
||||
|
if (!placeholders.empty()) { |
||||
|
for (const auto& placeholder : placeholders) { |
||||
|
if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder.lower(), |
||||
|
MEM_PRESERVE_PLACEHOLDER)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder"); |
||||
|
} |
||||
|
} |
||||
|
Coalesce(0, virtual_size); |
||||
|
} |
||||
|
if (virtual_base) { |
||||
|
if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to free virtual memory"); |
||||
|
} |
||||
|
} |
||||
|
if (backing_base) { |
||||
|
if (!pfn_UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder"); |
||||
|
} |
||||
|
if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to free backing memory"); |
||||
|
} |
||||
|
} |
||||
|
if (!CloseHandle(backing_handle)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle"); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
/// Unmap one placeholder in the given range (partial unmaps are supported)
|
||||
|
/// Return true when there are no more placeholders to unmap
|
||||
|
bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) { |
||||
|
const auto it = placeholders.find({virtual_offset, virtual_offset + length}); |
||||
|
const auto begin = placeholders.begin(); |
||||
|
const auto end = placeholders.end(); |
||||
|
if (it == end) { |
||||
|
return false; |
||||
|
} |
||||
|
const size_t placeholder_begin = it->lower(); |
||||
|
const size_t placeholder_end = it->upper(); |
||||
|
const size_t unmap_begin = std::max(virtual_offset, placeholder_begin); |
||||
|
const size_t unmap_end = std::min(virtual_offset + length, placeholder_end); |
||||
|
ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end); |
||||
|
ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin); |
||||
|
|
||||
|
const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin); |
||||
|
ASSERT(host_pointer_it != placeholder_host_pointers.end()); |
||||
|
const size_t host_offset = host_pointer_it->second; |
||||
|
|
||||
|
const bool split_left = unmap_begin > placeholder_begin; |
||||
|
const bool split_right = unmap_end < placeholder_end; |
||||
|
|
||||
|
if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder_begin, |
||||
|
MEM_PRESERVE_PLACEHOLDER)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder"); |
||||
|
} |
||||
|
// If we have to remap memory regions due to partial unmaps, we are in a data race as
|
||||
|
// Windows doesn't support remapping memory without unmapping first. Avoid adding any extra
|
||||
|
// logic within the panic region described below.
|
||||
|
|
||||
|
// Panic region, we are in a data race right now
|
||||
|
if (split_left || split_right) { |
||||
|
Split(unmap_begin, unmap_end - unmap_begin); |
||||
|
} |
||||
|
if (split_left) { |
||||
|
MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin); |
||||
|
} |
||||
|
if (split_right) { |
||||
|
MapView(unmap_end, host_offset + unmap_end - placeholder_begin, |
||||
|
placeholder_end - unmap_end); |
||||
|
} |
||||
|
// End panic region
|
||||
|
|
||||
|
size_t coalesce_begin = unmap_begin; |
||||
|
if (!split_left) { |
||||
|
// Try to coalesce pages to the left
|
||||
|
coalesce_begin = it == begin ? 0 : std::prev(it)->upper(); |
||||
|
if (coalesce_begin != placeholder_begin) { |
||||
|
Coalesce(coalesce_begin, unmap_end - coalesce_begin); |
||||
|
} |
||||
|
} |
||||
|
if (!split_right) { |
||||
|
// Try to coalesce pages to the right
|
||||
|
const auto next = std::next(it); |
||||
|
const size_t next_begin = next == end ? virtual_size : next->lower(); |
||||
|
if (placeholder_end != next_begin) { |
||||
|
// We can coalesce to the right
|
||||
|
Coalesce(coalesce_begin, next_begin - coalesce_begin); |
||||
|
} |
||||
|
} |
||||
|
// Remove and reinsert placeholder trackers
|
||||
|
UntrackPlaceholder(it); |
||||
|
if (split_left) { |
||||
|
TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin); |
||||
|
} |
||||
|
if (split_right) { |
||||
|
TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin, |
||||
|
placeholder_end - unmap_end); |
||||
|
} |
||||
|
return true; |
||||
|
} |
||||
|
|
||||
|
void MapView(size_t virtual_offset, size_t host_offset, size_t length) { |
||||
|
if (!pfn_MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset, |
||||
|
length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to map placeholder"); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void Split(size_t virtual_offset, size_t length) { |
||||
|
if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, |
||||
|
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to split placeholder"); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void Coalesce(size_t virtual_offset, size_t length) { |
||||
|
if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length, |
||||
|
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) { |
||||
|
LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders"); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) { |
||||
|
placeholders.insert({virtual_offset, virtual_offset + length}); |
||||
|
placeholder_host_pointers.emplace(virtual_offset, host_offset); |
||||
|
} |
||||
|
|
||||
|
void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) { |
||||
|
placeholders.erase(it); |
||||
|
placeholder_host_pointers.erase(it->lower()); |
||||
|
} |
||||
|
|
||||
|
/// Return true when a given memory region is a "nieche" and the placeholders don't have to be
|
||||
|
/// splitted.
|
||||
|
bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { |
||||
|
const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); |
||||
|
if (it != placeholders.end() && it->lower() == virtual_offset + length) { |
||||
|
const bool is_root = it == placeholders.begin() && virtual_offset == 0; |
||||
|
return is_root || std::prev(it)->upper() == virtual_offset; |
||||
|
} |
||||
|
return false; |
||||
|
} |
||||
|
|
||||
|
HANDLE process{}; ///< Current process handle
|
||||
|
HANDLE backing_handle{}; ///< File based backing memory
|
||||
|
|
||||
|
DynamicLibrary kernelbase_dll; |
||||
|
PFN_CreateFileMapping2 pfn_CreateFileMapping2{}; |
||||
|
PFN_VirtualAlloc2 pfn_VirtualAlloc2{}; |
||||
|
PFN_MapViewOfFile3 pfn_MapViewOfFile3{}; |
||||
|
PFN_UnmapViewOfFile2 pfn_UnmapViewOfFile2{}; |
||||
|
|
||||
|
std::mutex placeholder_mutex; ///< Mutex for placeholders
|
||||
|
boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders
|
||||
|
std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset
|
||||
|
}; |
||||
|
|
||||
|
#elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv
|
||||
|
|
||||
|
class HostMemory::Impl { |
||||
|
public: |
||||
|
explicit Impl(size_t backing_size_, size_t virtual_size_) |
||||
|
: backing_size{backing_size_}, virtual_size{virtual_size_} { |
||||
|
bool good = false; |
||||
|
SCOPE_EXIT({ |
||||
|
if (!good) { |
||||
|
Release(); |
||||
|
} |
||||
|
}); |
||||
|
|
||||
|
// Backing memory initialization
|
||||
|
fd = memfd_create("HostMemory", 0); |
||||
|
if (fd == -1) { |
||||
|
LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno)); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
|
||||
|
// Defined to extend the file with zeros
|
||||
|
int ret = ftruncate(fd, backing_size); |
||||
|
if (ret != 0) { |
||||
|
LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?", |
||||
|
strerror(errno)); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
|
||||
|
backing_base = static_cast<u8*>( |
||||
|
mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)); |
||||
|
if (backing_base == MAP_FAILED) { |
||||
|
LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
|
||||
|
// Virtual memory initialization
|
||||
|
virtual_base = static_cast<u8*>( |
||||
|
mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); |
||||
|
if (virtual_base == MAP_FAILED) { |
||||
|
LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno)); |
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
|
||||
|
good = true; |
||||
|
} |
||||
|
|
||||
|
~Impl() { |
||||
|
Release(); |
||||
|
} |
||||
|
|
||||
|
void Map(size_t virtual_offset, size_t host_offset, size_t length) { |
||||
|
|
||||
|
void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE, |
||||
|
MAP_SHARED | MAP_FIXED, fd, host_offset); |
||||
|
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); |
||||
|
} |
||||
|
|
||||
|
void Unmap(size_t virtual_offset, size_t length) { |
||||
|
// The method name is wrong. We're still talking about the virtual range.
|
||||
|
// We don't want to unmap, we want to reserve this memory.
|
||||
|
|
||||
|
void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE, |
||||
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); |
||||
|
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); |
||||
|
} |
||||
|
|
||||
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write) { |
||||
|
int flags = 0; |
||||
|
if (read) { |
||||
|
flags |= PROT_READ; |
||||
|
} |
||||
|
if (write) { |
||||
|
flags |= PROT_WRITE; |
||||
|
} |
||||
|
int ret = mprotect(virtual_base + virtual_offset, length, flags); |
||||
|
ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno)); |
||||
|
} |
||||
|
|
||||
|
const size_t backing_size; ///< Size of the backing memory in bytes
|
||||
|
const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
|
||||
|
|
||||
|
u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)}; |
||||
|
u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)}; |
||||
|
|
||||
|
private: |
||||
|
/// Release all resources in the object
|
||||
|
void Release() { |
||||
|
if (virtual_base != MAP_FAILED) { |
||||
|
int ret = munmap(virtual_base, virtual_size); |
||||
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); |
||||
|
} |
||||
|
|
||||
|
if (backing_base != MAP_FAILED) { |
||||
|
int ret = munmap(backing_base, backing_size); |
||||
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); |
||||
|
} |
||||
|
|
||||
|
if (fd != -1) { |
||||
|
int ret = close(fd); |
||||
|
ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno)); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create
|
||||
|
}; |
||||
|
|
||||
|
#else // ^^^ Linux ^^^ vvv Generic vvv
|
||||
|
|
||||
|
class HostMemory::Impl { |
||||
|
public: |
||||
|
explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) { |
||||
|
// This is just a place holder.
|
||||
|
// Please implement fastmem in a propper way on your platform.
|
||||
|
throw std::bad_alloc{}; |
||||
|
} |
||||
|
|
||||
|
void Map(size_t virtual_offset, size_t host_offset, size_t length) {} |
||||
|
|
||||
|
void Unmap(size_t virtual_offset, size_t length) {} |
||||
|
|
||||
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write) {} |
||||
|
|
||||
|
u8* backing_base{nullptr}; |
||||
|
u8* virtual_base{nullptr}; |
||||
|
}; |
||||
|
|
||||
|
#endif // ^^^ Generic ^^^
|
||||
|
|
||||
|
HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_) |
||||
|
: backing_size(backing_size_), virtual_size(virtual_size_) { |
||||
|
try { |
||||
|
// Try to allocate a fastmem arena.
|
||||
|
// The implementation will fail with std::bad_alloc on errors.
|
||||
|
impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment), |
||||
|
AlignUp(virtual_size, PageAlignment) + |
||||
|
3 * HugePageSize); |
||||
|
backing_base = impl->backing_base; |
||||
|
virtual_base = impl->virtual_base; |
||||
|
|
||||
|
if (virtual_base) { |
||||
|
virtual_base += 2 * HugePageSize - 1; |
||||
|
virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1); |
||||
|
virtual_base_offset = virtual_base - impl->virtual_base; |
||||
|
} |
||||
|
|
||||
|
} catch (const std::bad_alloc&) { |
||||
|
LOG_CRITICAL(HW_Memory, |
||||
|
"Fastmem unavailable, falling back to VirtualBuffer for memory allocation"); |
||||
|
fallback_buffer = std::make_unique<Common::VirtualBuffer<u8>>(backing_size); |
||||
|
backing_base = fallback_buffer->data(); |
||||
|
virtual_base = nullptr; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
HostMemory::~HostMemory() = default; |
||||
|
|
||||
|
HostMemory::HostMemory(HostMemory&&) noexcept = default; |
||||
|
|
||||
|
HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; |
||||
|
|
||||
|
void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) { |
||||
|
ASSERT(virtual_offset % PageAlignment == 0); |
||||
|
ASSERT(host_offset % PageAlignment == 0); |
||||
|
ASSERT(length % PageAlignment == 0); |
||||
|
ASSERT(virtual_offset + length <= virtual_size); |
||||
|
ASSERT(host_offset + length <= backing_size); |
||||
|
if (length == 0 || !virtual_base || !impl) { |
||||
|
return; |
||||
|
} |
||||
|
impl->Map(virtual_offset + virtual_base_offset, host_offset, length); |
||||
|
} |
||||
|
|
||||
|
void HostMemory::Unmap(size_t virtual_offset, size_t length) { |
||||
|
ASSERT(virtual_offset % PageAlignment == 0); |
||||
|
ASSERT(length % PageAlignment == 0); |
||||
|
ASSERT(virtual_offset + length <= virtual_size); |
||||
|
if (length == 0 || !virtual_base || !impl) { |
||||
|
return; |
||||
|
} |
||||
|
impl->Unmap(virtual_offset + virtual_base_offset, length); |
||||
|
} |
||||
|
|
||||
|
void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) { |
||||
|
ASSERT(virtual_offset % PageAlignment == 0); |
||||
|
ASSERT(length % PageAlignment == 0); |
||||
|
ASSERT(virtual_offset + length <= virtual_size); |
||||
|
if (length == 0 || !virtual_base || !impl) { |
||||
|
return; |
||||
|
} |
||||
|
impl->Protect(virtual_offset + virtual_base_offset, length, read, write); |
||||
|
} |
||||
|
|
||||
|
} // namespace Common
|
||||
@ -0,0 +1,70 @@ |
|||||
|
// Copyright 2019 yuzu Emulator Project |
||||
|
// Licensed under GPLv2 or any later version |
||||
|
// Refer to the license.txt file included. |
||||
|
|
||||
|
#pragma once |
||||
|
|
||||
|
#include <memory> |
||||
|
#include "common/common_types.h" |
||||
|
#include "common/virtual_buffer.h" |
||||
|
|
||||
|
namespace Common { |
||||
|
|
||||
|
/** |
||||
|
* A low level linear memory buffer, which supports multiple mappings |
||||
|
* Its purpose is to rebuild a given sparse memory layout, including mirrors. |
||||
|
*/ |
||||
|
class HostMemory { |
||||
|
public: |
||||
|
explicit HostMemory(size_t backing_size_, size_t virtual_size_); |
||||
|
~HostMemory(); |
||||
|
|
||||
|
/** |
||||
|
* Copy constructors. They shall return a copy of the buffer without the mappings. |
||||
|
* TODO: Implement them with COW if needed. |
||||
|
*/ |
||||
|
HostMemory(const HostMemory& other) = delete; |
||||
|
HostMemory& operator=(const HostMemory& other) = delete; |
||||
|
|
||||
|
/** |
||||
|
* Move constructors. They will move the buffer and the mappings to the new object. |
||||
|
*/ |
||||
|
HostMemory(HostMemory&& other) noexcept; |
||||
|
HostMemory& operator=(HostMemory&& other) noexcept; |
||||
|
|
||||
|
void Map(size_t virtual_offset, size_t host_offset, size_t length); |
||||
|
|
||||
|
void Unmap(size_t virtual_offset, size_t length); |
||||
|
|
||||
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write); |
||||
|
|
||||
|
[[nodiscard]] u8* BackingBasePointer() noexcept { |
||||
|
return backing_base; |
||||
|
} |
||||
|
[[nodiscard]] const u8* BackingBasePointer() const noexcept { |
||||
|
return backing_base; |
||||
|
} |
||||
|
|
||||
|
[[nodiscard]] u8* VirtualBasePointer() noexcept { |
||||
|
return virtual_base; |
||||
|
} |
||||
|
[[nodiscard]] const u8* VirtualBasePointer() const noexcept { |
||||
|
return virtual_base; |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
size_t backing_size{}; |
||||
|
size_t virtual_size{}; |
||||
|
|
||||
|
// Low level handler for the platform dependent memory routines |
||||
|
class Impl; |
||||
|
std::unique_ptr<Impl> impl; |
||||
|
u8* backing_base{}; |
||||
|
u8* virtual_base{}; |
||||
|
size_t virtual_base_offset{}; |
||||
|
|
||||
|
// Fallback if fastmem is not supported on this platform |
||||
|
std::unique_ptr<Common::VirtualBuffer<u8>> fallback_buffer; |
||||
|
}; |
||||
|
|
||||
|
} // namespace Common |
||||
@ -0,0 +1,183 @@ |
|||||
|
// Copyright 2021 yuzu emulator team
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include <catch2/catch.hpp>
|
||||
|
|
||||
|
#include "common/host_memory.h"
|
||||
|
|
||||
|
using Common::HostMemory; |
||||
|
|
||||
|
static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; |
||||
|
static constexpr size_t BACKING_SIZE = 4ULL * 1024 * 1024 * 1024; |
||||
|
|
||||
|
TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { |
||||
|
{ HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } |
||||
|
{ HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Simple map", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x5000, 0x8000, 0x1000); |
||||
|
|
||||
|
volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
||||
|
data[0] = 50; |
||||
|
REQUIRE(data[0] == 50); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Simple mirror map", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x5000, 0x3000, 0x2000); |
||||
|
mem.Map(0x8000, 0x4000, 0x1000); |
||||
|
|
||||
|
volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; |
||||
|
volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; |
||||
|
mirror_b[0] = 76; |
||||
|
REQUIRE(mirror_a[0x1000] == 76); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Simple unmap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x5000, 0x3000, 0x2000); |
||||
|
|
||||
|
volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
||||
|
data[75] = 50; |
||||
|
REQUIRE(data[75] == 50); |
||||
|
|
||||
|
mem.Unmap(0x5000, 0x2000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x5000, 0x3000, 0x2000); |
||||
|
|
||||
|
volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
||||
|
data[0] = 50; |
||||
|
REQUIRE(data[0] == 50); |
||||
|
|
||||
|
mem.Unmap(0x5000, 0x2000); |
||||
|
|
||||
|
mem.Map(0x5000, 0x3000, 0x2000); |
||||
|
REQUIRE(data[0] == 50); |
||||
|
|
||||
|
mem.Map(0x7000, 0x2000, 0x5000); |
||||
|
REQUIRE(data[0x3000] == 50); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Nieche allocation", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x0000, 0, 0x20000); |
||||
|
mem.Unmap(0x0000, 0x4000); |
||||
|
mem.Map(0x1000, 0, 0x2000); |
||||
|
mem.Map(0x3000, 0, 0x1000); |
||||
|
mem.Map(0, 0, 0x1000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Full unmap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x8000, 0, 0x4000); |
||||
|
mem.Unmap(0x8000, 0x4000); |
||||
|
mem.Map(0x6000, 0, 0x16000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x0000, 0, 0x4000); |
||||
|
mem.Unmap(0x2000, 0x4000); |
||||
|
mem.Map(0x2000, 0x80000, 0x4000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x8000, 0, 0x4000); |
||||
|
mem.Unmap(0x6000, 0x4000); |
||||
|
mem.Map(0x8000, 0, 0x2000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x0000, 0, 0x4000); |
||||
|
mem.Map(0x4000, 0, 0x1b000); |
||||
|
mem.Unmap(0x3000, 0x1c000); |
||||
|
mem.Map(0x3000, 0, 0x20000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x0000, 0, 0x4000); |
||||
|
mem.Map(0x4000, 0, 0x4000); |
||||
|
mem.Unmap(0x2000, 0x4000); |
||||
|
mem.Map(0x2000, 0, 0x4000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Unmap to origin", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0, 0x4000); |
||||
|
mem.Map(0x8000, 0, 0x4000); |
||||
|
mem.Unmap(0x4000, 0x4000); |
||||
|
mem.Map(0, 0, 0x4000); |
||||
|
mem.Map(0x4000, 0, 0x4000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Unmap to right", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0, 0x4000); |
||||
|
mem.Map(0x8000, 0, 0x4000); |
||||
|
mem.Unmap(0x8000, 0x4000); |
||||
|
mem.Map(0x8000, 0, 0x4000); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0x10000, 0x4000); |
||||
|
|
||||
|
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
||||
|
ptr[0x1000] = 17; |
||||
|
|
||||
|
mem.Unmap(0x6000, 0x2000); |
||||
|
|
||||
|
REQUIRE(ptr[0x1000] == 17); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0x10000, 0x4000); |
||||
|
|
||||
|
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
||||
|
ptr[0x3000] = 19; |
||||
|
ptr[0x3fff] = 12; |
||||
|
|
||||
|
mem.Unmap(0x4000, 0x2000); |
||||
|
|
||||
|
REQUIRE(ptr[0x3000] == 19); |
||||
|
REQUIRE(ptr[0x3fff] == 12); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0x10000, 0x4000); |
||||
|
|
||||
|
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
||||
|
ptr[0x0000] = 19; |
||||
|
ptr[0x3fff] = 12; |
||||
|
|
||||
|
mem.Unmap(0x1000, 0x2000); |
||||
|
|
||||
|
REQUIRE(ptr[0x0000] == 19); |
||||
|
REQUIRE(ptr[0x3fff] == 12); |
||||
|
} |
||||
|
|
||||
|
TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { |
||||
|
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
||||
|
mem.Map(0x4000, 0x10000, 0x2000); |
||||
|
mem.Map(0x6000, 0x20000, 0x2000); |
||||
|
|
||||
|
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
||||
|
ptr[0x0000] = 19; |
||||
|
ptr[0x3fff] = 12; |
||||
|
|
||||
|
mem.Unmap(0x5000, 0x2000); |
||||
|
|
||||
|
REQUIRE(ptr[0x0000] == 19); |
||||
|
REQUIRE(ptr[0x3fff] == 12); |
||||
|
} |
||||
Write
Preview
Loading…
Cancel
Save
Reference in new issue