Browse Source

Revert "[core/memory] Remove defered heap allocation on Linux." (#2974)

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/2974
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: PavelBARABANOV <pavelbarabanov94@gmail.com>
Co-committed-by: PavelBARABANOV <pavelbarabanov94@gmail.com>
pull/2996/head
PavelBARABANOV 1 month ago
committed by crueter
parent
commit
42863027e2
No known key found for this signature in database GPG Key ID: 425ACD2D4830EBC6
  1. 79
      src/common/heap_tracker.cpp
  2. 1
      src/core/CMakeLists.txt
  3. 52
      src/core/arm/dynarmic/arm_dynarmic.cpp
  4. 20
      src/core/arm/dynarmic/arm_dynarmic.h
  5. 5
      src/core/arm/dynarmic/arm_dynarmic_32.cpp
  6. 5
      src/core/arm/dynarmic/arm_dynarmic_64.cpp
  7. 15
      src/core/memory.cpp
  8. 2
      src/core/memory.h

79
src/common/heap_tracker.cpp

@ -1,11 +1,10 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fstream>
#include <vector>
#include "common/heap_tracker.h"
#include "common/logging/log.h"
#include "common/assert.h"
@ -37,8 +36,6 @@ HeapTracker::~HeapTracker() = default;
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
MemoryPermission perm, bool is_separate_heap) {
bool rebuild_required = false;
// When mapping other memory, map pages immediately.
if (!is_separate_heap) {
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
@ -60,29 +57,11 @@ void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
// Insert into mappings.
m_map_count++;
const auto it = m_mappings.insert(*map);
// Update tick before possible rebuild.
it->tick = m_tick++;
// Check if we need to rebuild.
if (m_resident_map_count >= m_max_resident_map_count) {
rebuild_required = true;
m_mappings.insert(*map);
}
// Map the area.
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
// This map is now resident.
it->is_resident = true;
m_resident_map_count++;
m_resident_mappings.insert(*it);
}
if (rebuild_required) {
// A rebuild was required, so perform it now.
this->RebuildSeparateHeapAddressSpace();
}
// Finally, map.
this->DeferredMapSeparateHeap(virtual_offset);
}
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
@ -169,7 +148,6 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
// Clamp to end.
next = (std::min)(next, end);
// Reprotect, if we need to.
if (should_protect) {
m_buffer.Protect(cur, next - cur, perm);
@ -180,6 +158,51 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
}
}
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
if (m_buffer.IsInVirtualRange(fault_address)) {
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
}
return false;
}
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
bool rebuild_required = false;
{
std::scoped_lock lk{m_lock};
// Check to ensure this was a non-resident separate heap mapping.
const auto it = this->GetNearestHeapMapLocked(virtual_offset);
if (it == m_mappings.end() || it->is_resident) {
return false;
}
// Update tick before possible rebuild.
it->tick = m_tick++;
// Check if we need to rebuild.
if (m_resident_map_count > m_max_resident_map_count) {
rebuild_required = true;
}
// Map the area.
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
// This map is now resident.
it->is_resident = true;
m_resident_map_count++;
m_resident_mappings.insert(*it);
}
if (rebuild_required) {
// A rebuild was required, so perform it now.
this->RebuildSeparateHeapAddressSpace();
}
return true;
}
void HeapTracker::RebuildSeparateHeapAddressSpace() {
std::scoped_lock lk{m_rebuild_lock, m_lock};
@ -190,8 +213,8 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() {
// Despite being worse in theory, this has proven to be better in practice than more
// regularly dumping a smaller amount, because it significantly reduces average case
// lock contention.
const size_t desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
const size_t evict_count = m_resident_map_count - desired_count;
std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
std::size_t const evict_count = m_resident_map_count - desired_count;
auto it = m_resident_mappings.begin();
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {

1
src/core/CMakeLists.txt

@ -1231,6 +1231,7 @@ endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
target_sources(core PRIVATE
arm/dynarmic/arm_dynarmic.cpp
arm/dynarmic/arm_dynarmic.h
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h

52
src/core/arm/dynarmic/arm_dynarmic.cpp

@ -0,0 +1,52 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef __linux__
#include "common/signal_chain.h"
#include "core/arm/dynarmic/arm_dynarmic.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
namespace Core {
namespace {
thread_local Core::Memory::Memory* g_current_memory{};
std::once_flag g_registered{};
struct sigaction g_old_segv {};
void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
return;
}
return g_old_segv.sa_sigaction(sig, info, ctx);
}
} // namespace
ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
g_current_memory = std::addressof(process->GetMemory());
}
ScopedJitExecution::~ScopedJitExecution() {
g_current_memory = nullptr;
}
void ScopedJitExecution::RegisterHandler() {
std::call_once(g_registered, [] {
struct sigaction sa {};
sa.sa_sigaction = &HandleSigSegv;
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
});
}
} // namespace Core
#endif

20
src/core/arm/dynarmic/arm_dynarmic.h

@ -29,4 +29,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
return static_cast<HaltReason>(hr);
}
#ifdef __linux__
class ScopedJitExecution {
public:
explicit ScopedJitExecution(Kernel::KProcess* process);
~ScopedJitExecution();
static void RegisterHandler();
};
#else
class ScopedJitExecution {
public:
explicit ScopedJitExecution(Kernel::KProcess* process) {}
~ScopedJitExecution() {}
static void RegisterHandler() {}
};
#endif
} // namespace Core

5
src/core/arm/dynarmic/arm_dynarmic_32.cpp

@ -341,11 +341,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
}
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@ -387,6 +391,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
m_jit = MakeJit(&page_table_impl);
ScopedJitExecution::RegisterHandler();
}
ArmDynarmic32::~ArmDynarmic32() = default;

5
src/core/arm/dynarmic/arm_dynarmic_64.cpp

@ -372,11 +372,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
}
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@ -416,6 +420,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
auto& page_table = process->GetPageTable().GetBasePageTable();
auto& page_table_impl = page_table.GetImpl();
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
ScopedJitExecution::RegisterHandler();
}
ArmDynarmic64::~ArmDynarmic64() = default;

15
src/core/memory.cpp

@ -1230,7 +1230,22 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
if (rasterizer) {
impl->InvalidateGPUMemory(ptr, size);
}
#ifdef __linux__
if (!rasterizer && mapped) {
impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
}
#endif
return mapped && ptr != nullptr;
}
bool Memory::InvalidateSeparateHeap(void* fault_address) {
#ifdef __linux__
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
#else
return false;
#endif
}
} // namespace Core::Memory

2
src/core/memory.h

@ -495,6 +495,8 @@ public:
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
bool InvalidateSeparateHeap(void* fault_address);
private:
Core::System& system;

Loading…
Cancel
Save