Browse Source

[port] NetBSD and improper ctor for SpinLock fixes

pull/3092/head
lizzie 4 weeks ago
committed by crueter
parent
commit
ef92e986bb
  1. 4
      src/core/arm/dynarmic/arm_dynarmic_32.cpp
  2. 2
      src/core/arm/dynarmic/arm_dynarmic_64.cpp
  3. 2
      src/dynarmic/CMakeLists.txt
  4. 2
      src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp
  5. 2
      src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp
  6. 35
      src/dynarmic/src/dynarmic/common/spin_lock_x64.cpp

4
src/core/arm/dynarmic/arm_dynarmic_32.cpp

@ -211,7 +211,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* pa
config.enable_cycle_counting = !m_uses_wall_clock; config.enable_cycle_counting = !m_uses_wall_clock;
// Code cache size // Code cache size
#if defined(ARCHITECTURE_arm64) || defined(__sun__)
#if defined(ARCHITECTURE_arm64) || defined(__sun__) || defined(__NetBSD__)
config.code_cache_size = std::uint32_t(128_MiB); config.code_cache_size = std::uint32_t(128_MiB);
#else #else
config.code_cache_size = std::uint32_t(512_MiB); config.code_cache_size = std::uint32_t(512_MiB);
@ -295,7 +295,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* pa
// Curated optimizations // Curated optimizations
case Settings::CpuAccuracy::Auto: case Settings::CpuAccuracy::Auto:
config.unsafe_optimizations = true; config.unsafe_optimizations = true;
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__sun__) || defined(__HAIKU__) || defined(__DragonFly__)
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__sun__) || defined(__HAIKU__) || defined(__DragonFly__) || defined(__NetBSD__)
config.fastmem_pointer = std::nullopt; config.fastmem_pointer = std::nullopt;
config.fastmem_exclusive_access = false; config.fastmem_exclusive_access = false;
#endif #endif

2
src/core/arm/dynarmic/arm_dynarmic_64.cpp

@ -270,7 +270,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
config.enable_cycle_counting = !m_uses_wall_clock; config.enable_cycle_counting = !m_uses_wall_clock;
// Code cache size // Code cache size
#if defined(ARCHITECTURE_arm64) || defined(__sun__)
#if defined(ARCHITECTURE_arm64) || defined(__sun__) || defined(__NetBSD__)
config.code_cache_size = std::uint32_t(128_MiB); config.code_cache_size = std::uint32_t(128_MiB);
#else #else
config.code_cache_size = std::uint32_t(512_MiB); config.code_cache_size = std::uint32_t(512_MiB);

2
src/dynarmic/CMakeLists.txt

@ -18,7 +18,7 @@ endif()
# Dynarmic project options # Dynarmic project options
option(DYNARMIC_ENABLE_CPU_FEATURE_DETECTION "Turning this off causes dynarmic to assume the host CPU doesn't support anything later than SSE3" ON) option(DYNARMIC_ENABLE_CPU_FEATURE_DETECTION "Turning this off causes dynarmic to assume the host CPU doesn't support anything later than SSE3" ON)
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" ${PLATFORM_OPENBSD})
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" "${PLATFORM_OPENBSD} OR ${PLATFORM_NETBSD}")
option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" OFF) option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" OFF)
option(DYNARMIC_TESTS_USE_UNICORN "Enable fuzzing tests against unicorn" OFF) option(DYNARMIC_TESTS_USE_UNICORN "Enable fuzzing tests against unicorn" OFF)

2
src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp

@ -87,9 +87,11 @@ A32EmitX64::A32EmitX64(BlockOfCode& code, A32::UserConfig conf, A32::Jit* jit_in
code.PreludeComplete(); code.PreludeComplete();
ClearFastDispatchTable(); ClearFastDispatchTable();
if (conf.fastmem_pointer.has_value()) {
exception_handler.SetFastmemCallback([this](u64 rip_) { exception_handler.SetFastmemCallback([this](u64 rip_) {
return FastmemCallback(rip_); return FastmemCallback(rip_);
}); });
}
} }
A32EmitX64::~A32EmitX64() = default; A32EmitX64::~A32EmitX64() = default;

2
src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp

@ -61,9 +61,11 @@ A64EmitX64::A64EmitX64(BlockOfCode& code, A64::UserConfig conf, A64::Jit* jit_in
code.PreludeComplete(); code.PreludeComplete();
ClearFastDispatchTable(); ClearFastDispatchTable();
if (conf.fastmem_pointer.has_value()) {
exception_handler.SetFastmemCallback([this](u64 rip_) { exception_handler.SetFastmemCallback([this](u64 rip_) {
return FastmemCallback(rip_); return FastmemCallback(rip_);
}); });
}
} }
A64EmitX64::~A64EmitX64() = default; A64EmitX64::~A64EmitX64() = default;

35
src/dynarmic/src/dynarmic/common/spin_lock_x64.cpp

@ -7,7 +7,7 @@
*/ */
#include <mutex> #include <mutex>
#include <optional>
#include <xbyak/xbyak.h> #include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/abi.h" #include "dynarmic/backend/x64/abi.h"
@ -42,43 +42,46 @@ void EmitSpinLockUnlock(Xbyak::CodeGenerator& code, Xbyak::Reg64 ptr, Xbyak::Reg
} }
namespace { namespace {
struct SpinLockImpl { struct SpinLockImpl {
void Initialize();
void Initialize() noexcept;
static void GlobalInitialize() noexcept;
Xbyak::CodeGenerator code = Xbyak::CodeGenerator(4096, default_cg_mode); Xbyak::CodeGenerator code = Xbyak::CodeGenerator(4096, default_cg_mode);
void (*lock)(volatile int*);
void (*unlock)(volatile int*);
void (*lock)(volatile int*) = nullptr;
void (*unlock)(volatile int*) = nullptr;
}; };
std::once_flag flag; std::once_flag flag;
SpinLockImpl impl;
void SpinLockImpl::Initialize() {
const Xbyak::Reg64 ABI_PARAM1 = Backend::X64::HostLocToReg64(Backend::X64::ABI_PARAM1);
/// @brief Bear in mind that initializing the variable as-is on ctor time will trigger bugs
/// because some OSes do not prepare mprotect() properly at static ctor time
/// We can't really do anything about it, so just live with this fact
std::optional<SpinLockImpl> impl;
void SpinLockImpl::Initialize() noexcept {
Xbyak::Reg64 const ABI_PARAM1 = Backend::X64::HostLocToReg64(Backend::X64::ABI_PARAM1);
code.align(); code.align();
lock = code.getCurr<void (*)(volatile int*)>(); lock = code.getCurr<void (*)(volatile int*)>();
EmitSpinLockLock(code, ABI_PARAM1, code.eax); EmitSpinLockLock(code, ABI_PARAM1, code.eax);
code.ret(); code.ret();
code.align(); code.align();
unlock = code.getCurr<void (*)(volatile int*)>(); unlock = code.getCurr<void (*)(volatile int*)>();
EmitSpinLockUnlock(code, ABI_PARAM1, code.eax); EmitSpinLockUnlock(code, ABI_PARAM1, code.eax);
code.ret(); code.ret();
} }
void SpinLockImpl::GlobalInitialize() noexcept {
impl.emplace();
impl->Initialize();
}
} // namespace } // namespace
void SpinLock::Lock() noexcept { void SpinLock::Lock() noexcept {
std::call_once(flag, &SpinLockImpl::Initialize, impl);
impl.lock(&storage);
std::call_once(flag, &SpinLockImpl::GlobalInitialize);
impl->lock(&storage);
} }
void SpinLock::Unlock() noexcept { void SpinLock::Unlock() noexcept {
std::call_once(flag, &SpinLockImpl::Initialize, impl);
impl.unlock(&storage);
std::call_once(flag, &SpinLockImpl::GlobalInitialize);
impl->unlock(&storage);
} }
} // namespace Dynarmic } // namespace Dynarmic
Loading…
Cancel
Save