Browse Source

[memory] coalesce redundant remappings of MultiPageLevel (#3857)

there is no need to call mmap() over the mapped region as the OS will automatically map it via lazy paging

basically the mmap() and virtualAlloc on a region already allocated is a no-op (FOR THIS SPECIFIC USECASE)

Signed-off-by: lizzie <lizzie@eden-emu.dev>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3857
Reviewed-by: crueter <crueter@eden-emu.dev>
pull/3899/head
lizzie 5 days ago
committed by crueter
parent
commit
d69bd86183
No known key found for this signature in database GPG Key ID: 425ACD2D4830EBC6
  1. 41
      src/common/multi_level_page_table.inc

41
src/common/multi_level_page_table.inc

@ -13,13 +13,13 @@
namespace Common {
template <typename BaseAddr>
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
std::size_t first_level_bits_,
std::size_t page_bits_)
: address_space_bits{address_space_bits_},
first_level_bits{first_level_bits_}, page_bits{page_bits_} {
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, std::size_t first_level_bits_, std::size_t page_bits_)
: address_space_bits{address_space_bits_}
, first_level_bits{first_level_bits_}
, page_bits{page_bits_}
{
if (page_bits == 0) {
return;
return;
}
first_level_shift = address_space_bits - first_level_bits;
first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
@ -30,12 +30,9 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
#else
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
if (base == MAP_FAILED) {
if (base == MAP_FAILED)
base = nullptr;
}
#endif
ASSERT(base);
base_ptr = reinterpret_cast<BaseAddr*>(base);
}
@ -56,29 +53,21 @@ template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
const u64 new_start = start >> first_level_shift;
const u64 new_end = (start + size) >> first_level_shift;
for (u64 i = new_start; i <= new_end; i++) {
if (!first_level_map[i]) {
for (u64 i = new_start; i <= new_end; i++)
if (!first_level_map[i])
AllocateLevel(i);
}
}
}
template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 index) {
void* ptr = reinterpret_cast<char *>(base_ptr) + index * first_level_chunk_size;
#ifdef _WIN32
void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
void* base = VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE);
ASSERT(base);
#else
void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
if (base == MAP_FAILED) {
base = nullptr;
}
void* base = ptr;
#endif
ASSERT(base);
first_level_map[level] = base;
first_level_map[index] = base;
}
} // namespace Common
Loading…
Cancel
Save