|
|
@ -1,3 +1,5 @@ |
|
|
|
|
|
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
|
|
|
|
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
|
|
|
|
@ -34,68 +36,60 @@ HeapTracker::~HeapTracker() = default; |
|
|
|
|
|
|
|
|
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, |
|
|
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, |
|
|
MemoryPermission perm, bool is_separate_heap) { |
|
|
MemoryPermission perm, bool is_separate_heap) { |
|
|
|
|
|
bool rebuild_required = false; |
|
|
// When mapping other memory, map pages immediately.
|
|
|
// When mapping other memory, map pages immediately.
|
|
|
if (!is_separate_heap) { |
|
|
if (!is_separate_heap) { |
|
|
m_buffer.Map(virtual_offset, host_offset, length, perm, false); |
|
|
m_buffer.Map(virtual_offset, host_offset, length, perm, false); |
|
|
return; |
|
|
return; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
{ |
|
|
// We are mapping part of a separate heap.
|
|
|
|
|
|
|
|
|
// We are mapping part of a separate heap and insert into mappings.
|
|
|
std::scoped_lock lk{m_lock}; |
|
|
std::scoped_lock lk{m_lock}; |
|
|
|
|
|
|
|
|
auto* const map = new SeparateHeapMap{ |
|
|
|
|
|
.vaddr = virtual_offset, |
|
|
|
|
|
|
|
|
m_map_count++; |
|
|
|
|
|
const auto it = m_mappings.insert_or_assign(virtual_offset, SeparateHeapMap{ |
|
|
.paddr = host_offset, |
|
|
.paddr = host_offset, |
|
|
.size = length, |
|
|
.size = length, |
|
|
.tick = m_tick++, |
|
|
.tick = m_tick++, |
|
|
.perm = perm, |
|
|
.perm = perm, |
|
|
.is_resident = false, |
|
|
.is_resident = false, |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
// Insert into mappings.
|
|
|
|
|
|
m_map_count++; |
|
|
|
|
|
m_mappings.insert(*map); |
|
|
|
|
|
|
|
|
}); |
|
|
|
|
|
// Update tick before possible rebuild.
|
|
|
|
|
|
it.first->second.tick = m_tick++; |
|
|
|
|
|
// Check if we need to rebuild.
|
|
|
|
|
|
if (m_resident_map_count >= m_max_resident_map_count) |
|
|
|
|
|
rebuild_required = true; |
|
|
|
|
|
// Map the area.
|
|
|
|
|
|
m_buffer.Map(it.first->first, it.first->second.paddr, it.first->second.size, it.first->second.perm, false); |
|
|
|
|
|
// This map is now resident.
|
|
|
|
|
|
it.first->second.is_resident = true; |
|
|
|
|
|
m_resident_map_count++; |
|
|
|
|
|
m_resident_mappings.insert(*it.first); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Finally, map.
|
|
|
|
|
|
this->DeferredMapSeparateHeap(virtual_offset); |
|
|
|
|
|
|
|
|
// A rebuild was required, so perform it now.
|
|
|
|
|
|
if (rebuild_required) |
|
|
|
|
|
this->RebuildSeparateHeapAddressSpace(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { |
|
|
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { |
|
|
// If this is a separate heap...
|
|
|
// If this is a separate heap...
|
|
|
if (is_separate_heap) { |
|
|
if (is_separate_heap) { |
|
|
std::scoped_lock lk{m_lock}; |
|
|
std::scoped_lock lk{m_lock}; |
|
|
|
|
|
|
|
|
const SeparateHeapMap key{ |
|
|
|
|
|
.vaddr = virtual_offset, |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
// Split at the boundaries of the region we are removing.
|
|
|
// Split at the boundaries of the region we are removing.
|
|
|
this->SplitHeapMapLocked(virtual_offset); |
|
|
this->SplitHeapMapLocked(virtual_offset); |
|
|
this->SplitHeapMapLocked(virtual_offset + size); |
|
|
this->SplitHeapMapLocked(virtual_offset + size); |
|
|
|
|
|
|
|
|
// Erase all mappings in range.
|
|
|
// Erase all mappings in range.
|
|
|
auto it = m_mappings.find(key); |
|
|
|
|
|
while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { |
|
|
|
|
|
// Get underlying item.
|
|
|
|
|
|
auto* const item = std::addressof(*it); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto it = m_mappings.find(virtual_offset); |
|
|
|
|
|
while (it != m_mappings.end() && it->first < virtual_offset + size) { |
|
|
// If resident, erase from resident map.
|
|
|
// If resident, erase from resident map.
|
|
|
if (item->is_resident) { |
|
|
|
|
|
|
|
|
if (it->second.is_resident) { |
|
|
ASSERT(--m_resident_map_count >= 0); |
|
|
ASSERT(--m_resident_map_count >= 0); |
|
|
m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); |
|
|
|
|
|
|
|
|
m_resident_mappings.erase(m_resident_mappings.find(it->first)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Erase from map.
|
|
|
// Erase from map.
|
|
|
ASSERT(--m_map_count >= 0); |
|
|
ASSERT(--m_map_count >= 0); |
|
|
it = m_mappings.erase(it); |
|
|
it = m_mappings.erase(it); |
|
|
|
|
|
|
|
|
// Free the item.
|
|
|
|
|
|
delete item; |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Unmap pages.
|
|
|
// Unmap pages.
|
|
|
m_buffer.Unmap(virtual_offset, size, false); |
|
|
m_buffer.Unmap(virtual_offset, size, false); |
|
|
} |
|
|
} |
|
|
@ -117,110 +111,51 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p |
|
|
|
|
|
|
|
|
{ |
|
|
{ |
|
|
std::scoped_lock lk2{m_lock}; |
|
|
std::scoped_lock lk2{m_lock}; |
|
|
|
|
|
|
|
|
const SeparateHeapMap key{ |
|
|
|
|
|
.vaddr = next, |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
// Try to get the next mapping corresponding to this address.
|
|
|
// Try to get the next mapping corresponding to this address.
|
|
|
const auto it = m_mappings.nfind(key); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const auto it = m_mappings.find(next); |
|
|
if (it == m_mappings.end()) { |
|
|
if (it == m_mappings.end()) { |
|
|
// There are no separate heap mappings remaining.
|
|
|
// There are no separate heap mappings remaining.
|
|
|
next = end; |
|
|
next = end; |
|
|
should_protect = true; |
|
|
should_protect = true; |
|
|
} else if (it->vaddr == cur) { |
|
|
|
|
|
|
|
|
} else if (it->first == cur) { |
|
|
// We are in range.
|
|
|
// We are in range.
|
|
|
// Update permission bits.
|
|
|
// Update permission bits.
|
|
|
it->perm = perm; |
|
|
|
|
|
|
|
|
it->second.perm = perm; |
|
|
|
|
|
|
|
|
// Determine next address and whether we should protect.
|
|
|
// Determine next address and whether we should protect.
|
|
|
next = cur + it->size; |
|
|
|
|
|
should_protect = it->is_resident; |
|
|
|
|
|
|
|
|
next = cur + it->second.size; |
|
|
|
|
|
should_protect = it->second.is_resident; |
|
|
} else /* if (it->vaddr > cur) */ { |
|
|
} else /* if (it->vaddr > cur) */ { |
|
|
// We weren't in range, but there is a block coming up that will be.
|
|
|
// We weren't in range, but there is a block coming up that will be.
|
|
|
next = it->vaddr; |
|
|
|
|
|
|
|
|
next = it->first; |
|
|
should_protect = true; |
|
|
should_protect = true; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// Clamp to end.
|
|
|
// Clamp to end.
|
|
|
next = std::min(next, end); |
|
|
next = std::min(next, end); |
|
|
|
|
|
|
|
|
// Reprotect, if we need to.
|
|
|
// Reprotect, if we need to.
|
|
|
if (should_protect) { |
|
|
|
|
|
|
|
|
if (should_protect) |
|
|
m_buffer.Protect(cur, next - cur, perm); |
|
|
m_buffer.Protect(cur, next - cur, perm); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Advance.
|
|
|
// Advance.
|
|
|
cur = next; |
|
|
cur = next; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { |
|
|
|
|
|
if (m_buffer.IsInVirtualRange(fault_address)) { |
|
|
|
|
|
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return false; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { |
|
|
|
|
|
bool rebuild_required = false; |
|
|
|
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
std::scoped_lock lk{m_lock}; |
|
|
|
|
|
|
|
|
|
|
|
// Check to ensure this was a non-resident separate heap mapping.
|
|
|
|
|
|
const auto it = this->GetNearestHeapMapLocked(virtual_offset); |
|
|
|
|
|
if (it == m_mappings.end() || it->is_resident) { |
|
|
|
|
|
return false; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Update tick before possible rebuild.
|
|
|
|
|
|
it->tick = m_tick++; |
|
|
|
|
|
|
|
|
|
|
|
// Check if we need to rebuild.
|
|
|
|
|
|
if (m_resident_map_count > m_max_resident_map_count) { |
|
|
|
|
|
rebuild_required = true; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Map the area.
|
|
|
|
|
|
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); |
|
|
|
|
|
|
|
|
|
|
|
// This map is now resident.
|
|
|
|
|
|
it->is_resident = true; |
|
|
|
|
|
m_resident_map_count++; |
|
|
|
|
|
m_resident_mappings.insert(*it); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (rebuild_required) { |
|
|
|
|
|
// A rebuild was required, so perform it now.
|
|
|
|
|
|
this->RebuildSeparateHeapAddressSpace(); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void HeapTracker::RebuildSeparateHeapAddressSpace() { |
|
|
void HeapTracker::RebuildSeparateHeapAddressSpace() { |
|
|
std::scoped_lock lk{m_rebuild_lock, m_lock}; |
|
|
std::scoped_lock lk{m_rebuild_lock, m_lock}; |
|
|
|
|
|
|
|
|
ASSERT(!m_resident_mappings.empty()); |
|
|
ASSERT(!m_resident_mappings.empty()); |
|
|
|
|
|
|
|
|
// Dump half of the mappings.
|
|
|
// Dump half of the mappings.
|
|
|
//
|
|
|
|
|
|
// Despite being worse in theory, this has proven to be better in practice than more
|
|
|
// Despite being worse in theory, this has proven to be better in practice than more
|
|
|
// regularly dumping a smaller amount, because it significantly reduces average case
|
|
|
// regularly dumping a smaller amount, because it significantly reduces average case
|
|
|
// lock contention.
|
|
|
// lock contention.
|
|
|
const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2; |
|
|
|
|
|
const size_t evict_count = m_resident_map_count - desired_count; |
|
|
|
|
|
|
|
|
std::size_t const desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2; |
|
|
|
|
|
std::size_t const evict_count = m_resident_map_count - desired_count; |
|
|
auto it = m_resident_mappings.begin(); |
|
|
auto it = m_resident_mappings.begin(); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { |
|
|
|
|
|
|
|
|
for (std::size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { |
|
|
// Unmark and unmap.
|
|
|
// Unmark and unmap.
|
|
|
it->is_resident = false; |
|
|
|
|
|
m_buffer.Unmap(it->vaddr, it->size, false); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->second.is_resident = false; |
|
|
|
|
|
m_buffer.Unmap(it->first, it->second.size, false); |
|
|
// Advance.
|
|
|
// Advance.
|
|
|
ASSERT(--m_resident_map_count >= 0); |
|
|
ASSERT(--m_resident_map_count >= 0); |
|
|
it = m_resident_mappings.erase(it); |
|
|
it = m_resident_mappings.erase(it); |
|
|
@ -229,53 +164,32 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() { |
|
|
|
|
|
|
|
|
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { |
|
|
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { |
|
|
std::scoped_lock lk{m_lock}; |
|
|
std::scoped_lock lk{m_lock}; |
|
|
|
|
|
|
|
|
this->SplitHeapMapLocked(offset); |
|
|
this->SplitHeapMapLocked(offset); |
|
|
this->SplitHeapMapLocked(offset + size); |
|
|
this->SplitHeapMapLocked(offset + size); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
void HeapTracker::SplitHeapMapLocked(VAddr offset) { |
|
|
void HeapTracker::SplitHeapMapLocked(VAddr offset) { |
|
|
const auto it = this->GetNearestHeapMapLocked(offset); |
|
|
|
|
|
if (it == m_mappings.end() || it->vaddr == offset) { |
|
|
|
|
|
// Not contained or no split required.
|
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Cache the original values.
|
|
|
|
|
|
auto* const left = std::addressof(*it); |
|
|
|
|
|
const size_t orig_size = left->size; |
|
|
|
|
|
|
|
|
|
|
|
// Adjust the left map.
|
|
|
|
|
|
const size_t left_size = offset - left->vaddr; |
|
|
|
|
|
left->size = left_size; |
|
|
|
|
|
|
|
|
|
|
|
// Create the new right map.
|
|
|
|
|
|
auto* const right = new SeparateHeapMap{ |
|
|
|
|
|
.vaddr = left->vaddr + left_size, |
|
|
|
|
|
.paddr = left->paddr + left_size, |
|
|
|
|
|
.size = orig_size - left_size, |
|
|
|
|
|
.tick = left->tick, |
|
|
|
|
|
.perm = left->perm, |
|
|
|
|
|
.is_resident = left->is_resident, |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
// Insert the new right map.
|
|
|
|
|
|
m_map_count++; |
|
|
|
|
|
m_mappings.insert(*right); |
|
|
|
|
|
|
|
|
|
|
|
// If resident, also insert into resident map.
|
|
|
|
|
|
if (right->is_resident) { |
|
|
|
|
|
m_resident_map_count++; |
|
|
|
|
|
m_resident_mappings.insert(*right); |
|
|
|
|
|
|
|
|
auto it = this->GetNearestHeapMapLocked(offset); |
|
|
|
|
|
if (it != m_mappings.end() && it->first != offset) { |
|
|
|
|
|
// Adjust left iterator
|
|
|
|
|
|
auto const orig_size = it->second.size; |
|
|
|
|
|
auto const left_size = offset - it->first; |
|
|
|
|
|
it->second.size = left_size; |
|
|
|
|
|
// Insert the new right map.
|
|
|
|
|
|
auto const right = SeparateHeapMap{ |
|
|
|
|
|
.paddr = it->second.paddr + left_size, |
|
|
|
|
|
.size = orig_size - left_size, |
|
|
|
|
|
.tick = it->second.tick, |
|
|
|
|
|
.perm = it->second.perm, |
|
|
|
|
|
.is_resident = it->second.is_resident, |
|
|
|
|
|
}; |
|
|
|
|
|
m_map_count++; |
|
|
|
|
|
auto rit = m_mappings.insert_or_assign(it->first + left_size, right); |
|
|
|
|
|
if (rit.first->second.is_resident) { |
|
|
|
|
|
m_resident_map_count++; |
|
|
|
|
|
m_resident_mappings.insert(*rit.first); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { |
|
|
|
|
|
const SeparateHeapMap key{ |
|
|
|
|
|
.vaddr = offset, |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
return m_mappings.find(key); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} // namespace Common
|
|
|
} // namespace Common
|