|
|
@ -5,6 +5,7 @@ |
|
|
#include <algorithm>
|
|
|
#include <algorithm>
|
|
|
#include <iterator>
|
|
|
#include <iterator>
|
|
|
#include <utility>
|
|
|
#include <utility>
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
#include "common/assert.h"
|
|
|
#include "common/assert.h"
|
|
|
#include "common/logging/log.h"
|
|
|
#include "common/logging/log.h"
|
|
|
#include "common/memory_hook.h"
|
|
|
#include "common/memory_hook.h"
|
|
|
@ -103,7 +104,7 @@ bool VMManager::IsValidHandle(VMAHandle handle) const { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
|
|
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
|
|
std::shared_ptr<std::vector<u8>> block, |
|
|
|
|
|
|
|
|
std::shared_ptr<PhysicalMemory> block, |
|
|
std::size_t offset, u64 size, |
|
|
std::size_t offset, u64 size, |
|
|
MemoryState state, VMAPermission perm) { |
|
|
MemoryState state, VMAPermission perm) { |
|
|
ASSERT(block != nullptr); |
|
|
ASSERT(block != nullptr); |
|
|
@ -260,7 +261,7 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { |
|
|
|
|
|
|
|
|
if (heap_memory == nullptr) { |
|
|
if (heap_memory == nullptr) { |
|
|
// Initialize heap
|
|
|
// Initialize heap
|
|
|
heap_memory = std::make_shared<std::vector<u8>>(size); |
|
|
|
|
|
|
|
|
heap_memory = std::make_shared<PhysicalMemory>(size); |
|
|
heap_end = heap_region_base + size; |
|
|
heap_end = heap_region_base + size; |
|
|
} else { |
|
|
} else { |
|
|
UnmapRange(heap_region_base, GetCurrentHeapSize()); |
|
|
UnmapRange(heap_region_base, GetCurrentHeapSize()); |
|
|
@ -341,7 +342,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { |
|
|
const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); |
|
|
const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); |
|
|
if (vma.state == MemoryState::Unmapped) { |
|
|
if (vma.state == MemoryState::Unmapped) { |
|
|
const auto map_res = |
|
|
const auto map_res = |
|
|
MapMemoryBlock(cur_addr, std::make_shared<std::vector<u8>>(map_size, 0), 0, |
|
|
|
|
|
|
|
|
MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size, 0), 0, |
|
|
map_size, MemoryState::Heap, VMAPermission::ReadWrite); |
|
|
map_size, MemoryState::Heap, VMAPermission::ReadWrite); |
|
|
result = map_res.Code(); |
|
|
result = map_res.Code(); |
|
|
if (result.IsError()) { |
|
|
if (result.IsError()) { |
|
|
@ -442,7 +443,7 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { |
|
|
if (result.IsError()) { |
|
|
if (result.IsError()) { |
|
|
for (const auto [map_address, map_size] : unmapped_regions) { |
|
|
for (const auto [map_address, map_size] : unmapped_regions) { |
|
|
const auto remap_res = |
|
|
const auto remap_res = |
|
|
MapMemoryBlock(map_address, std::make_shared<std::vector<u8>>(map_size, 0), 0, |
|
|
|
|
|
|
|
|
MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size, 0), 0, |
|
|
map_size, MemoryState::Heap, VMAPermission::None); |
|
|
map_size, MemoryState::Heap, VMAPermission::None); |
|
|
ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); |
|
|
ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); |
|
|
} |
|
|
} |
|
|
@ -593,7 +594,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem |
|
|
ASSERT_MSG(vma_offset + size <= vma->second.size, |
|
|
ASSERT_MSG(vma_offset + size <= vma->second.size, |
|
|
"Shared memory exceeds bounds of mapped block"); |
|
|
"Shared memory exceeds bounds of mapped block"); |
|
|
|
|
|
|
|
|
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; |
|
|
|
|
|
|
|
|
const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block; |
|
|
const std::size_t backing_block_offset = vma->second.offset + vma_offset; |
|
|
const std::size_t backing_block_offset = vma->second.offset + vma_offset; |
|
|
|
|
|
|
|
|
CASCADE_RESULT(auto new_vma, |
|
|
CASCADE_RESULT(auto new_vma, |
|
|
@ -606,7 +607,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem |
|
|
return RESULT_SUCCESS; |
|
|
return RESULT_SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) { |
|
|
|
|
|
|
|
|
void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { |
|
|
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
|
|
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
|
|
// specify a specific range of addresses to limit the scan to.
|
|
|
// specify a specific range of addresses to limit the scan to.
|
|
|
for (const auto& p : vma_map) { |
|
|
for (const auto& p : vma_map) { |
|
|
@ -764,7 +765,7 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre |
|
|
right.backing_block->begin() + right.offset + right.size); |
|
|
right.backing_block->begin() + right.offset + right.size); |
|
|
} else { |
|
|
} else { |
|
|
// Slow case: make a new memory block for left and right.
|
|
|
// Slow case: make a new memory block for left and right.
|
|
|
auto new_memory = std::make_shared<std::vector<u8>>(); |
|
|
|
|
|
|
|
|
auto new_memory = std::make_shared<PhysicalMemory>(); |
|
|
new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, |
|
|
new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, |
|
|
left.backing_block->begin() + left.offset + left.size); |
|
|
left.backing_block->begin() + left.offset + left.size); |
|
|
new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, |
|
|
new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, |
|
|
|