diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index fd6ba42312..999b380595 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -66,6 +66,8 @@ add_library( fs/path_util.cpp fs/path_util.h hash.h + heap_tracker.cpp + heap_tracker.h hex_util.cpp hex_util.h host_memory.cpp diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp new file mode 100644 index 0000000000..a99d386d8a --- /dev/null +++ b/src/common/heap_tracker.cpp @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include "common/heap_tracker.h" +#include "common/logging/log.h" +#include "common/assert.h" + +namespace Common { + +namespace { + +s64 GetMaxPermissibleResidentMapCount() { + // Default value. + s64 value = 65530; + + // Try to read how many mappings we can make. + std::ifstream s("/proc/sys/vm/max_map_count"); + s >> value; + + // Print, for debug. + LOG_INFO(HW_Memory, "Current maximum map count: {}", value); + + // Allow 20000 maps for other code and to account for split inaccuracy. + return std::max(value - 20000, 0); +} + +} // namespace + +HeapTracker::HeapTracker(Common::HostMemory& buffer) + : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {} +HeapTracker::~HeapTracker() = default; + +void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, + MemoryPermission perm, bool is_separate_heap) { + // When mapping other memory, map pages immediately. + if (!is_separate_heap) { + m_buffer.Map(virtual_offset, host_offset, length, perm, false); + return; + } + + { + // We are mapping part of a separate heap. + std::scoped_lock lk{m_lock}; + + auto* const map = new SeparateHeapMap{ + .vaddr = virtual_offset, + .paddr = host_offset, + .size = length, + .tick = m_tick++, + .perm = perm, + .is_resident = false, + }; + + // Insert into mappings. + m_map_count++; + m_mappings.insert(*map); + } + + // Finally, map. + this->DeferredMapSeparateHeap(virtual_offset); +} + +void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { + // If this is a separate heap... + if (is_separate_heap) { + std::scoped_lock lk{m_lock}; + + const SeparateHeapMap key{ + .vaddr = virtual_offset, + }; + + // Split at the boundaries of the region we are removing. + this->SplitHeapMapLocked(virtual_offset); + this->SplitHeapMapLocked(virtual_offset + size); + + // Erase all mappings in range. + auto it = m_mappings.find(key); + while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { + // Get underlying item. + auto* const item = std::addressof(*it); + + // If resident, erase from resident map. + if (item->is_resident) { + ASSERT(--m_resident_map_count >= 0); + m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); + } + + // Erase from map. + ASSERT(--m_map_count >= 0); + it = m_mappings.erase(it); + + // Free the item. + delete item; + } + } + + // Unmap pages. + m_buffer.Unmap(virtual_offset, size, false); +} + +void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) { + // Ensure no rebuild occurs while reprotecting. + std::shared_lock lk{m_rebuild_lock}; + + // Split at the boundaries of the region we are reprotecting. + this->SplitHeapMap(virtual_offset, size); + + // Declare tracking variables. + const VAddr end = virtual_offset + size; + VAddr cur = virtual_offset; + + while (cur < end) { + VAddr next = cur; + bool should_protect = false; + + { + std::scoped_lock lk2{m_lock}; + + const SeparateHeapMap key{ + .vaddr = next, + }; + + // Try to get the next mapping corresponding to this address. + const auto it = m_mappings.nfind(key); + + if (it == m_mappings.end()) { + // There are no separate heap mappings remaining. + next = end; + should_protect = true; + } else if (it->vaddr == cur) { + // We are in range. + // Update permission bits. + it->perm = perm; + + // Determine next address and whether we should protect. + next = cur + it->size; + should_protect = it->is_resident; + } else /* if (it->vaddr > cur) */ { + // We weren't in range, but there is a block coming up that will be. + next = it->vaddr; + should_protect = true; + } + } + + // Clamp to end. + next = (std::min)(next, end); + // Reprotect, if we need to. + if (should_protect) { + m_buffer.Protect(cur, next - cur, perm); + } + + // Advance. + cur = next; + } +} + +bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { + if (m_buffer.IsInVirtualRange(fault_address)) { + return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); + } + + return false; +} + +bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { + bool rebuild_required = false; + + { + std::scoped_lock lk{m_lock}; + + // Check to ensure this was a non-resident separate heap mapping. + const auto it = this->GetNearestHeapMapLocked(virtual_offset); + if (it == m_mappings.end() || it->is_resident) { + return false; + } + + // Update tick before possible rebuild. + it->tick = m_tick++; + + // Check if we need to rebuild. + if (m_resident_map_count > m_max_resident_map_count) { + rebuild_required = true; + } + + // Map the area. + m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); + + // This map is now resident. + it->is_resident = true; + m_resident_map_count++; + m_resident_mappings.insert(*it); + } + + if (rebuild_required) { + // A rebuild was required, so perform it now. + this->RebuildSeparateHeapAddressSpace(); + } + + return true; +} + +void HeapTracker::RebuildSeparateHeapAddressSpace() { + std::scoped_lock lk{m_rebuild_lock, m_lock}; + + ASSERT(!m_resident_mappings.empty()); + + // Dump half of the mappings. + // + // Despite being worse in theory, this has proven to be better in practice than more + // regularly dumping a smaller amount, because it significantly reduces average case + // lock contention. + std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2; + std::size_t const evict_count = m_resident_map_count - desired_count; + auto it = m_resident_mappings.begin(); + + for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { + // Unmark and unmap. + it->is_resident = false; + m_buffer.Unmap(it->vaddr, it->size, false); + + // Advance. + ASSERT(--m_resident_map_count >= 0); + it = m_resident_mappings.erase(it); + } +} + +void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { + std::scoped_lock lk{m_lock}; + + this->SplitHeapMapLocked(offset); + this->SplitHeapMapLocked(offset + size); +} + +void HeapTracker::SplitHeapMapLocked(VAddr offset) { + const auto it = this->GetNearestHeapMapLocked(offset); + if (it == m_mappings.end() || it->vaddr == offset) { + // Not contained or no split required. + return; + } + + // Cache the original values. + auto* const left = std::addressof(*it); + const size_t orig_size = left->size; + + // Adjust the left map. + const size_t left_size = offset - left->vaddr; + left->size = left_size; + + // Create the new right map. + auto* const right = new SeparateHeapMap{ + .vaddr = left->vaddr + left_size, + .paddr = left->paddr + left_size, + .size = orig_size - left_size, + .tick = left->tick, + .perm = left->perm, + .is_resident = left->is_resident, + }; + + // Insert the new right map. + m_map_count++; + m_mappings.insert(*right); + + // If resident, also insert into resident map. + if (right->is_resident) { + m_resident_map_count++; + m_resident_mappings.insert(*right); + } +} + +HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { + const SeparateHeapMap key{ + .vaddr = offset, + }; + + return m_mappings.find(key); +} + +} // namespace Common diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h new file mode 100644 index 0000000000..ee5b0bf43a --- /dev/null +++ b/src/common/heap_tracker.h @@ -0,0 +1,98 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include +#include + +#include "common/host_memory.h" +#include "common/intrusive_red_black_tree.h" + +namespace Common { + +struct SeparateHeapMap { + Common::IntrusiveRedBlackTreeNode addr_node{}; + Common::IntrusiveRedBlackTreeNode tick_node{}; + VAddr vaddr{}; + PAddr paddr{}; + size_t size{}; + size_t tick{}; + MemoryPermission perm{}; + bool is_resident{}; +}; + +struct SeparateHeapMapAddrComparator { + static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { + if (lhs.vaddr < rhs.vaddr) { + return -1; + } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) { + return 0; + } else { + return 1; + } + } +}; + +struct SeparateHeapMapTickComparator { + static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { + if (lhs.tick < rhs.tick) { + return -1; + } else if (lhs.tick > rhs.tick) { + return 1; + } else { + return SeparateHeapMapAddrComparator::Compare(lhs, rhs); + } + } +}; + +class HeapTracker { +public: + explicit HeapTracker(Common::HostMemory& buffer); + ~HeapTracker(); + + void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, + bool is_separate_heap); + void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap); + void Protect(size_t virtual_offset, size_t length, MemoryPermission perm); + u8* VirtualBasePointer() { + return m_buffer.VirtualBasePointer(); + } + + bool DeferredMapSeparateHeap(u8* fault_address); + bool DeferredMapSeparateHeap(size_t virtual_offset); + +private: + using AddrTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>; + using AddrTree = AddrTreeTraits::TreeType; + + using TickTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>; + using TickTree = TickTreeTraits::TreeType; + + AddrTree m_mappings{}; + TickTree m_resident_mappings{}; + +private: + void SplitHeapMap(VAddr offset, size_t size); + void SplitHeapMapLocked(VAddr offset); + + AddrTree::iterator GetNearestHeapMapLocked(VAddr offset); + + void RebuildSeparateHeapAddressSpace(); + +private: + Common::HostMemory& m_buffer; + const s64 m_max_resident_map_count; + + std::shared_mutex m_rebuild_lock{}; + std::mutex m_lock{}; + s64 m_map_count{}; + s64 m_resident_map_count{}; + size_t m_tick{}; +}; + +} // namespace Common diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index 9d5ea829ab..5400b97018 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -725,7 +725,8 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default; HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; -void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) { +void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, + MemoryPermission perms, bool separate_heap) { ASSERT(virtual_offset % PageAlignment == 0); ASSERT(host_offset % PageAlignment == 0); ASSERT(length % PageAlignment == 0); @@ -737,7 +738,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, M impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); } -void HostMemory::Unmap(size_t virtual_offset, size_t length) { +void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) { ASSERT(virtual_offset % PageAlignment == 0); ASSERT(length % PageAlignment == 0); ASSERT(virtual_offset + length <= virtual_size); diff --git a/src/common/host_memory.h b/src/common/host_memory.h index 9dddb1faee..72fbb05af7 100644 --- a/src/common/host_memory.h +++ b/src/common/host_memory.h @@ -40,9 +40,10 @@ public: HostMemory(HostMemory&& other) noexcept; HostMemory& operator=(HostMemory&& other) noexcept; - void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); + void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms, + bool separate_heap); - void Unmap(size_t virtual_offset, size_t length); + void Unmap(size_t virtual_offset, size_t length, bool separate_heap); void Protect(size_t virtual_offset, size_t length, MemoryPermission perms); diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 83aabdbc72..ad886b4300 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -1231,6 +1231,7 @@ endif() if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) target_sources(core PRIVATE + arm/dynarmic/arm_dynarmic.cpp arm/dynarmic/arm_dynarmic.h arm/dynarmic/arm_dynarmic_64.cpp arm/dynarmic/arm_dynarmic_64.h diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp new file mode 100644 index 0000000000..06c8e40b08 --- /dev/null +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifdef __linux__ + +#include "common/signal_chain.h" + +#include "core/arm/dynarmic/arm_dynarmic.h" +#include "core/hle/kernel/k_process.h" +#include "core/memory.h" + +namespace Core { + +namespace { + +thread_local Core::Memory::Memory* g_current_memory{}; +std::once_flag g_registered{}; +struct sigaction g_old_segv {}; + +void HandleSigSegv(int sig, siginfo_t* info, void* ctx) { + if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) { + return; + } + + return g_old_segv.sa_sigaction(sig, info, ctx); +} + +} // namespace + +ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) { + g_current_memory = std::addressof(process->GetMemory()); +} + +ScopedJitExecution::~ScopedJitExecution() { + g_current_memory = nullptr; +} + +void ScopedJitExecution::RegisterHandler() { + std::call_once(g_registered, [] { + struct sigaction sa {}; + sa.sa_sigaction = &HandleSigSegv; + sa.sa_flags = SA_SIGINFO | SA_ONSTACK; + Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv)); + }); +} + +} // namespace Core + +#endif diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index 46384f7e6d..48779b83ff 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -27,4 +27,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) { return HaltReason(hr); } +#ifdef __linux__ + +class ScopedJitExecution { +public: + explicit ScopedJitExecution(Kernel::KProcess* process); + ~ScopedJitExecution(); + static void RegisterHandler(); +}; + +#else + +class ScopedJitExecution { +public: + explicit ScopedJitExecution(Kernel::KProcess* process) {} + ~ScopedJitExecution() {} + static void RegisterHandler() {} +}; + +#endif + } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 2acadaf3ed..21641744d5 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -344,11 +344,15 @@ bool ArmDynarmic32::IsInThumbMode() const { } HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -390,6 +394,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc m_cp15(std::make_shared(*this)), m_core_index{core_index} { auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); m_jit = MakeJit(&page_table_impl); + ScopedJitExecution::RegisterHandler(); } ArmDynarmic32::~ArmDynarmic32() = default; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index f1be21c6cd..b00a0d4346 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -375,11 +375,15 @@ std::shared_ptr ArmDynarmic64::MakeJit(Common::PageTable* pa } HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { + ScopedJitExecution sj(thread->GetOwnerProcess()); + m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -419,6 +423,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc auto& page_table = process->GetPageTable().GetBasePageTable(); auto& page_table_impl = page_table.GetImpl(); m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); + ScopedJitExecution::RegisterHandler(); } ArmDynarmic64::~ArmDynarmic64() = default; diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp index 4683f96ddb..6b3f60f52e 100644 --- a/src/core/hle/kernel/k_page_table_base.cpp +++ b/src/core/hle/kernel/k_page_table_base.cpp @@ -477,7 +477,7 @@ void KPageTableBase::Finalize() { auto BlockCallback = [&](KProcessAddress addr, u64 size) { if (m_impl->fastmem_arena) { - m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); + m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false); } // Get physical pages. @@ -5718,6 +5718,8 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a switch (operation) { case OperationType::Unmap: case OperationType::UnmapPhysical: { + const bool separate_heap = operation == OperationType::UnmapPhysical; + // Ensure that any pages we track are closed on exit. KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); SCOPE_EXIT { @@ -5728,14 +5730,15 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a this->MakePageGroup(pages_to_close, virt_addr, num_pages); // Unmap. - m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); + m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap); R_SUCCEED(); } case OperationType::Map: { ASSERT(virt_addr != 0); ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); - m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, ConvertToMemoryPermission(properties.perm)); + m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, + ConvertToMemoryPermission(properties.perm), false); // Open references to pages, if we should. if (this->IsHeapPhysicalAddress(phys_addr)) { @@ -5776,6 +5779,8 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a case OperationType::MapGroup: case OperationType::MapFirstGroup: case OperationType::MapFirstGroupPhysical: { + const bool separate_heap = operation == OperationType::MapFirstGroupPhysical; + // We want to maintain a new reference to every page in the group. KScopedPageGroup spg(page_group, operation == OperationType::MapGroup); @@ -5783,7 +5788,8 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a const size_t size{node.GetNumPages() * PageSize}; // Map the pages. - m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), ConvertToMemoryPermission(properties.perm)); + m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), + ConvertToMemoryPermission(properties.perm), separate_heap); virt_addr += size; } diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 232ace197e..c470bb6da8 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -15,6 +15,7 @@ #include "common/assert.h" #include "common/atomic_ops.h" #include "common/common_types.h" +#include "common/heap_tracker.h" #include "common/logging/log.h" #include "common/page_table.h" #include "common/scope_exit.h" @@ -59,11 +60,17 @@ struct Memory::Impl { current_page_table->fastmem_arena = nullptr; } +#ifdef __ANDROID__ + heap_tracker.emplace(system.DeviceMemory().buffer); + buffer = std::addressof(*heap_tracker); +#else buffer = std::addressof(system.DeviceMemory().buffer); +#endif } void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms) { + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap) { ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", GetInteger(target)); @@ -71,18 +78,20 @@ struct Memory::Impl { Common::PageType::Memory); if (current_page_table->fastmem_arena) { - buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms); + buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, + separate_heap); } } - void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { + void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap) { ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, Common::PageType::Unmapped); if (current_page_table->fastmem_arena) { - buffer->Unmap(GetInteger(base), size); + buffer->Unmap(GetInteger(base), size, separate_heap); } } @@ -1013,7 +1022,13 @@ struct Memory::Impl { std::array, Core::Hardware::NUM_CPU_CORES> scratch_buffers{}; std::span gpu_dirty_managers; std::mutex sys_core_guard; + + std::optional heap_tracker; +#ifdef __ANDROID__ + Common::HeapTracker* buffer{}; +#else Common::HostMemory* buffer{}; +#endif }; Memory::Memory(Core::System& system_) : system{system_} { @@ -1031,12 +1046,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) { } void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms) { - impl->MapMemoryRegion(page_table, base, size, target, perms); + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap) { + impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap); } -void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { - impl->UnmapRegion(page_table, base, size); +void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap) { + impl->UnmapRegion(page_table, base, size, separate_heap); } void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, @@ -1199,15 +1216,35 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { [[maybe_unused]] bool mapped = true; - u8* const ptr = impl->GetPointerImpl(GetInteger(vaddr), - [&] { - LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size, GetInteger(vaddr)); - mapped = false; - }, - [&] { - impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); - }); + [[maybe_unused]] bool rasterizer = false; + + u8* const ptr = impl->GetPointerImpl( + GetInteger(vaddr), + [&] { + LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size, + GetInteger(vaddr)); + mapped = false; + }, + [&] { rasterizer = true; }); + if (rasterizer) { + impl->InvalidateGPUMemory(ptr, size); + } + +#ifdef __ANDROID__ + if (!rasterizer && mapped) { + impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr)); + } +#endif + return mapped && ptr != nullptr; } +bool Memory::InvalidateSeparateHeap(void* fault_address) { +#ifdef __ANDROID__ + return impl->buffer->DeferredMapSeparateHeap(static_cast(fault_address)); +#else + return false; +#endif +} + } // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index 5a8f55680b..7167efbb84 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -92,7 +92,8 @@ public: * @param perms The permissions to map the memory with. */ void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, - Common::PhysicalAddress target, Common::MemoryPermission perms); + Common::PhysicalAddress target, Common::MemoryPermission perms, + bool separate_heap); /** * Unmaps a region of the emulated process address space. @@ -101,7 +102,8 @@ public: * @param base The address to begin unmapping at. * @param size The amount of bytes to unmap. */ - void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); + void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, + bool separate_heap); /** * Protects a region of the emulated process address space with the new permissions. @@ -493,6 +495,8 @@ public: bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); + bool InvalidateSeparateHeap(void* fault_address); + private: Core::System& system;