diff --git a/src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp b/src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp index 80f0f9cc2f..049a58432e 100644 --- a/src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp +++ b/src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp @@ -114,6 +114,8 @@ A32EmitX64::BlockDescriptor A32EmitX64::Emit(IR::Block& block) { // Start emitting. code.align(); const u8* const entrypoint = code.getCurr(); + code.mov(code.qword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer)], rbp); + code.lea(rbp, code.ptr[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer) - 8]); EmitCondPrelude(ctx); @@ -146,15 +148,14 @@ A32EmitX64::BlockDescriptor A32EmitX64::Emit(IR::Block& block) { reg_alloc.AssertNoMoreUses(); - if (conf.enable_cycle_counting) { + if (conf.enable_cycle_counting) EmitAddCycles(block.CycleCount()); - } + code.mov(rbp, code.qword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer)]); EmitTerminal(block.GetTerminal(), ctx.Location().SetSingleStepping(false), ctx.IsSingleStep()); code.int3(); - for (auto& deferred_emit : ctx.deferred_emits) { + for (auto& deferred_emit : ctx.deferred_emits) deferred_emit(); - } code.int3(); const size_t size = size_t(code.getCurr() - entrypoint); diff --git a/src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp b/src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp index 832cfdcce2..7c43ea1ab2 100644 --- a/src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp +++ b/src/dynarmic/src/dynarmic/backend/x64/a64_emit_x64.cpp @@ -88,6 +88,8 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) noexcept { // Start emitting. code.align(); const auto* const entrypoint = code.getCurr(); + code.mov(code.qword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer)], rbp); + code.lea(rbp, code.ptr[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer) - 8]); DEBUG_ASSERT(block.GetCondition() == IR::Cond::AL); typedef void (EmitX64::*EmitHandlerFn)(EmitContext& context, IR::Inst* inst); @@ -139,16 +141,13 @@ finish_this_inst: } reg_alloc.AssertNoMoreUses(); - - if (conf.enable_cycle_counting) { + if (conf.enable_cycle_counting) EmitAddCycles(block.CycleCount()); - } + code.mov(rbp, code.qword[rsp + ABI_SHADOW_SPACE + offsetof(StackLayout, abi_base_pointer)]); EmitTerminal(block.GetTerminal(), ctx.Location().SetSingleStepping(false), ctx.IsSingleStep()); code.int3(); - - for (auto& deferred_emit : ctx.deferred_emits) { + for (auto& deferred_emit : ctx.deferred_emits) deferred_emit(); - } code.int3(); const size_t size = size_t(code.getCurr() - entrypoint); diff --git a/src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp b/src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp index 3ae553bccd..bae397ff2b 100644 --- a/src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp +++ b/src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project // SPDX-License-Identifier: GPL-3.0-or-later /* This file is part of the dynarmic project. @@ -176,7 +176,7 @@ struct ExceptionHandler::Impl final { code.align(16); const u8* exception_handler_without_cb = code.getCurr(); - code.mov(code.eax, static_cast(ExceptionContinueSearch)); + code.mov(code.eax, u32(ExceptionContinueSearch)); code.ret(); code.align(16); @@ -192,20 +192,18 @@ struct ExceptionHandler::Impl final { code.lea(code.rsp, code.ptr[code.rsp - 8]); code.mov(code.ABI_PARAM1, std::bit_cast(&cb)); code.mov(code.ABI_PARAM2, code.ABI_PARAM3); - code.CallLambda( - [](const std::function& cb_, PCONTEXT ctx) { - FakeCall fc = cb_(ctx->Rip); - - ctx->Rsp -= sizeof(u64); - *std::bit_cast(ctx->Rsp) = fc.ret_rip; - ctx->Rip = fc.call_rip; - }); + code.CallLambda([](const std::function& cb_, PCONTEXT ctx) { + FakeCall fc = cb_(ctx->Rip); + ctx->Rsp -= sizeof(u64); + *std::bit_cast(ctx->Rsp) = fc.ret_rip; + ctx->Rip = fc.call_rip; + }); code.add(code.rsp, 8); - code.mov(code.eax, static_cast(ExceptionContinueExecution)); + code.mov(code.eax, u32(ExceptionContinueExecution)); code.ret(); - exception_handler_without_cb_offset = static_cast(exception_handler_without_cb - code.getCode()); - exception_handler_with_cb_offset = static_cast(exception_handler_with_cb - code.getCode()); + exception_handler_without_cb_offset = ULONG(exception_handler_without_cb - code.getCode()); + exception_handler_with_cb_offset = ULONG(exception_handler_with_cb - code.getCode()); code.align(16); UNWIND_INFO* unwind_info = static_cast(code.AllocateFromCodeSpace(sizeof(UNWIND_INFO))); diff --git a/src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp b/src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp index 5c5ed25131..2cfa14ae18 100644 --- a/src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp +++ b/src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp @@ -417,7 +417,8 @@ HostLoc RegAlloc::SelectARegister(std::bitset<32> desired_locations) const noexc // While R13 and R14 are technically available, we avoid allocating for them // at all costs, because theoretically skipping them is better than spilling // all over the place - i also fixes bugs with high reg pressure - } else if (i >= HostLoc::R13 && i <= HostLoc::R15) { + // %rbp must not be trashed, so skip it as well + } else if (i == HostLoc::RBP || (i >= HostLoc::R13 && i <= HostLoc::R15)) { // skip, do not touch // Intel recommends to reuse registers as soon as they're overwritable (DO NOT SPILL) } else if (loc_info.IsEmpty()) { diff --git a/src/dynarmic/src/dynarmic/backend/x64/stack_layout.h b/src/dynarmic/src/dynarmic/backend/x64/stack_layout.h index 50737f12eb..43a3fc7ab2 100644 --- a/src/dynarmic/src/dynarmic/backend/x64/stack_layout.h +++ b/src/dynarmic/src/dynarmic/backend/x64/stack_layout.h @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project +// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project // SPDX-License-Identifier: GPL-3.0-or-later /* This file is part of the dynarmic project. @@ -22,14 +22,13 @@ constexpr size_t SpillCount = 64; #endif struct alignas(16) StackLayout { + // Needs alignment for VMOV and XMM spills + alignas(16) std::array, SpillCount> spill; s64 cycles_remaining; s64 cycles_to_run; - - std::array, SpillCount> spill; - u32 save_host_MXCSR; - bool check_bit; + u64 abi_base_pointer; }; #ifdef _MSC_VER