Browse Source

[dynarmic] replace mcl::bit_cast with std one (#2866)

Don't merge till it works on steamdeck :)
Signed-off-by: lizzie <lizzie@eden-emu.dev>

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/2866
Reviewed-by: Maufeat <sahyno1996@gmail.com>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Reviewed-by: crueter <crueter@eden-emu.dev>
Reviewed-by: Caio Oliveira <caiooliveirafarias0@gmail.com>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
pull/2885/head
lizzie 2 months ago
committed by crueter
parent
commit
b50f8c620b
No known key found for this signature in database GPG Key ID: 425ACD2D4830EBC6
  1. 20
      src/dynarmic/src/dynarmic/backend/arm64/a32_address_space.cpp
  2. 28
      src/dynarmic/src/dynarmic/backend/arm64/a64_address_space.cpp
  3. 9
      src/dynarmic/src/dynarmic/backend/arm64/address_space.cpp
  4. 20
      src/dynarmic/src/dynarmic/backend/arm64/devirtualize.h
  5. 11
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a64.cpp
  6. 9
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_memory.cpp
  7. 4
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector_floating_point.cpp
  8. 4
      src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.cpp
  9. 8
      src/dynarmic/src/dynarmic/backend/exception_handler_macos.cpp
  10. 10
      src/dynarmic/src/dynarmic/backend/exception_handler_posix.cpp
  11. 4
      src/dynarmic/src/dynarmic/backend/x64/a32_interface.cpp
  12. 4
      src/dynarmic/src/dynarmic/backend/x64/a64_interface.cpp
  13. 16
      src/dynarmic/src/dynarmic/backend/x64/devirtualize.h
  14. 32
      src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.cpp.inc
  15. 11
      src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.h
  16. 8
      src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp
  17. 7
      src/dynarmic/src/dynarmic/backend/x64/perf_map.h
  18. 2
      src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp
  19. 4
      src/dynarmic/src/dynarmic/common/llvm_disassemble.cpp
  20. 8
      src/dynarmic/src/dynarmic/ir/ir_emitter.h

20
src/dynarmic/src/dynarmic/backend/arm64/a32_address_space.cpp

@ -93,9 +93,9 @@ static void* EmitExclusiveReadCallTrampoline(oaknut::CodeGenerator& code, const
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -151,9 +151,9 @@ static void* EmitExclusiveWriteCallTrampoline(oaknut::CodeGenerator& code, const
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -219,7 +219,7 @@ void A32AddressSpace::EmitPrelude() {
code.MOV(Xstate, X1);
code.MOV(Xhalt, X2);
if (conf.page_table) {
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.MOV(Xfastmem, *conf.fastmem_pointer);
@ -258,7 +258,7 @@ void A32AddressSpace::EmitPrelude() {
code.MOV(Xstate, X1);
code.MOV(Xhalt, X2);
if (conf.page_table) {
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.MOV(Xfastmem, *conf.fastmem_pointer);
@ -317,9 +317,9 @@ void A32AddressSpace::EmitPrelude() {
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(this));
code.dx(std::bit_cast<u64>(this));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
}
prelude_info.return_from_run_code = code.xptr<void*>();
@ -347,7 +347,7 @@ void A32AddressSpace::EmitPrelude() {
code.align(8);
code.l(l_return_to_dispatcher);
code.dx(mcl::bit_cast<u64>(prelude_info.return_to_dispatcher));
code.dx(std::bit_cast<u64>(prelude_info.return_to_dispatcher));
prelude_info.end_of_prelude = code.offset();
@ -369,7 +369,7 @@ EmitConfig A32AddressSpace::GetEmitConfig() {
.check_halt_on_memory_access = conf.check_halt_on_memory_access,
.page_table_pointer = mcl::bit_cast<u64>(conf.page_table),
.page_table_pointer = std::bit_cast<u64>(conf.page_table),
.page_table_address_space_bits = 32,
.page_table_pointer_mask_bits = conf.page_table_pointer_mask_bits,
.silently_mirror_page_table = true,

28
src/dynarmic/src/dynarmic/backend/arm64/a64_address_space.cpp

@ -92,9 +92,9 @@ static void* EmitExclusiveReadCallTrampoline(oaknut::CodeGenerator& code, const
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -150,9 +150,9 @@ static void* EmitExclusiveWriteCallTrampoline(oaknut::CodeGenerator& code, const
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -235,9 +235,9 @@ static void* EmitExclusiveRead128CallTrampoline(oaknut::CodeGenerator& code, con
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -317,9 +317,9 @@ static void* EmitExclusiveWrite128CallTrampoline(oaknut::CodeGenerator& code, co
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(&conf));
code.dx(std::bit_cast<u64>(&conf));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
return target;
}
@ -396,7 +396,7 @@ void A64AddressSpace::EmitPrelude() {
code.MOV(Xstate, X1);
code.MOV(Xhalt, X2);
if (conf.page_table) {
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.MOV(Xfastmem, *conf.fastmem_pointer);
@ -434,7 +434,7 @@ void A64AddressSpace::EmitPrelude() {
code.MOV(Xstate, X1);
code.MOV(Xhalt, X2);
if (conf.page_table) {
code.MOV(Xpagetable, mcl::bit_cast<u64>(conf.page_table));
code.MOV(Xpagetable, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.MOV(Xfastmem, *conf.fastmem_pointer);
@ -492,9 +492,9 @@ void A64AddressSpace::EmitPrelude() {
code.align(8);
code.l(l_this);
code.dx(mcl::bit_cast<u64>(this));
code.dx(std::bit_cast<u64>(this));
code.l(l_addr);
code.dx(mcl::bit_cast<u64>(Common::FptrCast(fn)));
code.dx(std::bit_cast<u64>(Common::FptrCast(fn)));
}
prelude_info.return_from_run_code = code.xptr<void*>();
@ -522,7 +522,7 @@ void A64AddressSpace::EmitPrelude() {
code.align(8);
code.l(l_return_to_dispatcher);
code.dx(mcl::bit_cast<u64>(prelude_info.return_to_dispatcher));
code.dx(std::bit_cast<u64>(prelude_info.return_to_dispatcher));
prelude_info.end_of_prelude = code.offset();
@ -544,7 +544,7 @@ EmitConfig A64AddressSpace::GetEmitConfig() {
.check_halt_on_memory_access = conf.check_halt_on_memory_access,
.page_table_pointer = mcl::bit_cast<u64>(conf.page_table),
.page_table_pointer = std::bit_cast<u64>(conf.page_table),
.page_table_address_space_bits = conf.page_table_address_space_bits,
.page_table_pointer_mask_bits = conf.page_table_pointer_mask_bits,
.silently_mirror_page_table = conf.silently_mirror_page_table,

9
src/dynarmic/src/dynarmic/backend/arm64/address_space.cpp

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
@ -5,7 +8,7 @@
#include <cstdio>
#include <mcl/bit_cast.hpp>
#include <bit>
#include "dynarmic/backend/arm64/a64_address_space.h"
#include "dynarmic/backend/arm64/a64_jitstate.h"
@ -99,7 +102,7 @@ void AddressSpace::ClearCache() {
void AddressSpace::DumpDisassembly() const {
for (u32* ptr = mem.ptr(); ptr < code.xptr<u32*>(); ptr++) {
std::printf("%s", Common::DisassembleAArch64(*ptr, mcl::bit_cast<u64>(ptr)).c_str());
std::printf("%s", Common::DisassembleAArch64(*ptr, std::bit_cast<u64>(ptr)).c_str());
}
}
@ -316,7 +319,7 @@ void AddressSpace::RelinkForDescriptor(IR::LocationDescriptor target_descriptor,
FakeCall AddressSpace::FastmemCallback(u64 host_pc) {
{
const auto host_ptr = mcl::bit_cast<CodePtr>(host_pc);
const auto host_ptr = std::bit_cast<CodePtr>(host_pc);
const auto entry_point = ReverseGetEntryPoint(host_ptr);
if (!entry_point) {

20
src/dynarmic/src/dynarmic/backend/arm64/devirtualize.h

@ -8,12 +8,20 @@
#pragma once
#include <mcl/bit_cast.hpp>
#include <bit>
#include "dynarmic/common/common_types.h"
#include <mcl/type_traits/function_info.hpp>
namespace Dynarmic::Backend::Arm64 {
namespace impl {
template<typename T, typename P> inline T bit_cast_pointee(const P source_ptr) noexcept {
std::aligned_storage_t<sizeof(T), alignof(T)> dest;
std::memcpy(&dest, std::bit_cast<void*>(source_ptr), sizeof(T));
return reinterpret_cast<T&>(dest);
}
};
struct DevirtualizedCall {
u64 fn_ptr;
u64 this_ptr;
@ -23,7 +31,7 @@ struct DevirtualizedCall {
template<auto mfp>
DevirtualizedCall DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
static_assert(sizeof(mfp) == 8);
return DevirtualizedCall{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
return DevirtualizedCall{std::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
}
// https://github.com/ARM-software/abi-aa/blob/main/cppabi64/cppabi64.rst#representation-of-pointer-to-member-function
@ -34,16 +42,16 @@ DevirtualizedCall DevirtualizeDefault(mcl::class_type<decltype(mfp)>* this_) {
u64 ptr;
// LSB is discriminator for if function is virtual. Other bits are this adjustment.
u64 adj;
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
} mfp_struct = std::bit_cast<MemberFunctionPointer>(mfp);
static_assert(sizeof(MemberFunctionPointer) == 16);
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
u64 fn_ptr = mfp_struct.ptr;
u64 this_ptr = mcl::bit_cast<u64>(this_) + (mfp_struct.adj >> 1);
u64 this_ptr = std::bit_cast<u64>(this_) + (mfp_struct.adj >> 1);
if (mfp_struct.adj & 1) {
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr);
u64 vtable = impl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = impl::bit_cast_pointee<u64>(vtable + fn_ptr);
}
return DevirtualizedCall{fn_ptr, this_ptr};
}

11
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a64.cpp

@ -1,9 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/bit_cast.hpp>
#include <bit>
#include <oaknut/oaknut.hpp>
#include "dynarmic/backend/arm64/a64_jitstate.h"
@ -495,7 +498,7 @@ template<>
void EmitIR<IR::Opcode::A64GetTPIDR>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto Xvalue = ctx.reg_alloc.WriteX(inst);
RegAlloc::Realize(Xvalue);
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidr_el0));
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidr_el0));
code.LDR(Xvalue, Xscratch0);
}
@ -503,7 +506,7 @@ template<>
void EmitIR<IR::Opcode::A64GetTPIDRRO>(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst) {
auto Xvalue = ctx.reg_alloc.WriteX(inst);
RegAlloc::Realize(Xvalue);
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidrro_el0));
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidrro_el0));
code.LDR(Xvalue, Xscratch0);
}
@ -512,7 +515,7 @@ void EmitIR<IR::Opcode::A64SetTPIDR>(oaknut::CodeGenerator& code, EmitContext& c
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto Xvalue = ctx.reg_alloc.ReadX(args[0]);
RegAlloc::Realize(Xvalue);
code.MOV(Xscratch0, mcl::bit_cast<u64>(ctx.conf.tpidr_el0));
code.MOV(Xscratch0, std::bit_cast<u64>(ctx.conf.tpidr_el0));
code.STR(Xvalue, Xscratch0);
}

9
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_memory.cpp

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
@ -8,7 +11,7 @@
#include <optional>
#include <utility>
#include <mcl/bit_cast.hpp>
#include <bit>
#include <oaknut/oaknut.hpp>
#include "dynarmic/backend/arm64/abi.h"
@ -548,7 +551,7 @@ void FastmemEmitReadMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::In
FastmemPatchInfo{
.marker = marker,
.fc = FakeCall{
.call_pc = mcl::bit_cast<u64>(code.xptr<void*>()),
.call_pc = std::bit_cast<u64>(code.xptr<void*>()),
},
.recompile = ctx.conf.recompile_on_fastmem_failure,
});
@ -598,7 +601,7 @@ void FastmemEmitWriteMemory(oaknut::CodeGenerator& code, EmitContext& ctx, IR::I
FastmemPatchInfo{
.marker = marker,
.fc = FakeCall{
.call_pc = mcl::bit_cast<u64>(code.xptr<void*>()),
.call_pc = std::bit_cast<u64>(code.xptr<void*>()),
},
.recompile = ctx.conf.recompile_on_fastmem_failure,
});

4
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector_floating_point.cpp

@ -6,7 +6,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/bit_cast.hpp>
#include <bit>
#include <mcl/mp/metavalue/lift_value.hpp>
#include <mcl/mp/typelist/cartesian_product.hpp>
#include <mcl/mp/typelist/get.hpp>
@ -271,7 +271,7 @@ static void EmitTwoOpFallbackWithoutRegAlloc(oaknut::CodeGenerator& code, EmitCo
ABI_PushRegisters(code, ABI_CALLER_SAVE & ~(1ull << Qresult.index()), stack_size);
code.MOV(Xscratch0, mcl::bit_cast<u64>(fn));
code.MOV(Xscratch0, std::bit_cast<u64>(fn));
code.ADD(X0, SP, 0 * 16);
code.ADD(X1, SP, 1 * 16);
code.MOV(X2, fpcr);

4
src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.cpp

@ -14,7 +14,7 @@
#include "dynarmic/common/assert.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/bit_cast.hpp>
#include <bit>
#include <mcl/mp/metavalue/lift_value.hpp>
#include "dynarmic/common/common_types.h"
@ -246,7 +246,7 @@ void RegAlloc::AssertNoMoreUses() const {
}
void RegAlloc::EmitVerboseDebuggingOutput() {
code.MOV(X19, mcl::bit_cast<u64>(&PrintVerboseDebuggingOutputLine)); // Non-volatile register
code.MOV(X19, std::bit_cast<u64>(&PrintVerboseDebuggingOutputLine)); // Non-volatile register
const auto do_location = [&](HostLocInfo& info, HostLocType type, size_t index) {
using namespace oaknut::util;

8
src/dynarmic/src/dynarmic/backend/exception_handler_macos.cpp

@ -19,7 +19,7 @@
#include <fmt/format.h>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include <mcl/macro/architecture.hpp>
#include "dynarmic/common/common_types.h"
@ -146,7 +146,7 @@ kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) {
FakeCall fc = iter->cb(ts->__rip);
ts->__rsp -= sizeof(u64);
*mcl::bit_cast<u64*>(ts->__rsp) = fc.ret_rip;
*std::bit_cast<u64*>(ts->__rsp) = fc.ret_rip;
ts->__rip = fc.call_rip;
return KERN_SUCCESS;
@ -271,13 +271,13 @@ ExceptionHandler::~ExceptionHandler() = default;
#if defined(ARCHITECTURE_x86_64)
void ExceptionHandler::Register(X64::BlockOfCode& code) {
const u64 code_begin = mcl::bit_cast<u64>(code.getCode());
const u64 code_begin = std::bit_cast<u64>(code.getCode());
const u64 code_end = code_begin + code.GetTotalCodeSize();
impl = std::make_unique<Impl>(code_begin, code_end);
}
#elif defined(ARCHITECTURE_arm64)
void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
const u64 code_begin = mcl::bit_cast<u64>(mem.ptr());
const u64 code_begin = std::bit_cast<u64>(mem.ptr());
const u64 code_end = code_begin + size;
impl = std::make_unique<Impl>(code_begin, code_end);
}

10
src/dynarmic/src/dynarmic/backend/exception_handler_posix.cpp

@ -27,7 +27,7 @@
#else
# error "Invalid architecture"
#endif
#include <mcl/bit_cast.hpp>
#include <bit>
namespace Dynarmic::Backend {
@ -122,7 +122,7 @@ void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
if (auto const iter = sig_handler->FindCodeBlockInfo(CTX_RIP); iter != sig_handler->code_block_infos.end()) {
FakeCall fc = iter->second.cb(CTX_RIP);
CTX_RSP -= sizeof(u64);
*mcl::bit_cast<u64*>(CTX_RSP) = fc.ret_rip;
*std::bit_cast<u64*>(CTX_RSP) = fc.ret_rip;
CTX_RIP = fc.call_rip;
return;
}
@ -189,15 +189,15 @@ ExceptionHandler::~ExceptionHandler() = default;
#if defined(MCL_ARCHITECTURE_X86_64)
void ExceptionHandler::Register(X64::BlockOfCode& code) {
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(code.getCode()), code.GetTotalCodeSize());
impl = std::make_unique<Impl>(std::bit_cast<u64>(code.getCode()), code.GetTotalCodeSize());
}
#elif defined(MCL_ARCHITECTURE_ARM64)
void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(mem.ptr()), size);
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr()), size);
}
#elif defined(MCL_ARCHITECTURE_RISCV)
void ExceptionHandler::Register(RV64::CodeBlock& mem, std::size_t size) {
impl = std::make_unique<Impl>(mcl::bit_cast<u64>(mem.ptr<u64>()), size);
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr<u64>()), size);
}
#else
# error "Invalid architecture"

4
src/dynarmic/src/dynarmic/backend/x64/a32_interface.cpp

@ -13,7 +13,7 @@
#include <boost/icl/interval_set.hpp>
#include <fmt/format.h>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include <mcl/scope_exit.hpp>
#include "dynarmic/common/common_types.h"
@ -47,7 +47,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*Lo
static std::function<void(BlockOfCode&)> GenRCP(const A32::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
code.mov(code.r14, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.mov(code.r13, *conf.fastmem_pointer);

4
src/dynarmic/src/dynarmic/backend/x64/a64_interface.cpp

@ -12,7 +12,7 @@
#include <boost/icl/interval_set.hpp>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include <mcl/scope_exit.hpp>
#include "dynarmic/backend/x64/a64_emit_x64.h"
@ -43,7 +43,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo
static std::function<void(BlockOfCode&)> GenRCP(const A64::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
code.mov(code.r14, std::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.mov(code.r13, *conf.fastmem_pointer);

16
src/dynarmic/src/dynarmic/backend/x64/devirtualize.h

@ -11,7 +11,7 @@
#include <cstring>
#include <utility>
#include <mcl/bit_cast.hpp>
#include <bit>
#include "dynarmic/common/common_types.h"
#include <mcl/type_traits/function_info.hpp>
@ -32,6 +32,12 @@ struct ThunkBuilder<R (C::*)(Args...), mfp> {
}
};
template<typename T, typename P> inline T bit_cast_pointee(const P source_ptr) noexcept {
std::aligned_storage_t<sizeof(T), alignof(T)> dest;
std::memcpy(&dest, std::bit_cast<void*>(source_ptr), sizeof(T));
return reinterpret_cast<T&>(dest);
}
} // namespace impl
template<auto mfp>
@ -42,7 +48,7 @@ ArgCallback DevirtualizeGeneric(mcl::class_type<decltype(mfp)>* this_) {
template<auto mfp>
ArgCallback DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
static_assert(sizeof(mfp) == 8);
return ArgCallback{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
return ArgCallback{std::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
}
template<auto mfp>
@ -53,7 +59,7 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
u64 ptr;
/// The required adjustment to `this`, prior to the call.
u64 adj;
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
} mfp_struct = std::bit_cast<MemberFunctionPointer>(mfp);
static_assert(sizeof(MemberFunctionPointer) == 16);
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
@ -61,8 +67,8 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
u64 fn_ptr = mfp_struct.ptr;
u64 this_ptr = reinterpret_cast<u64>(this_) + mfp_struct.adj;
if (mfp_struct.ptr & 1) {
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr - 1);
u64 vtable = impl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = impl::bit_cast_pointee<u64>(vtable + fn_ptr - 1);
}
return ArgCallback{fn_ptr, this_ptr};
}

32
src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.cpp.inc

@ -102,10 +102,10 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
code.call(wrapped_fn);
fastmem_patch_info.emplace(
mcl::bit_cast<u64>(location),
std::bit_cast<u64>(location),
FastmemPatchInfo{
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
std::bit_cast<u64>(code.getCurr()),
std::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_fastmem_failure,
});
@ -189,10 +189,10 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
code.call(wrapped_fn);
fastmem_patch_info.emplace(
mcl::bit_cast<u64>(location),
std::bit_cast<u64>(location),
FastmemPatchInfo{
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
std::bit_cast<u64>(code.getCurr()),
std::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_fastmem_failure,
});
@ -349,7 +349,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
EmitExclusiveLock(code, conf, tmp, tmp2.cvt32());
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(1));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(qword[tmp], vaddr);
const auto fastmem_marker = ShouldFastmem(ctx, inst);
@ -362,10 +362,10 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
const auto location = EmitReadMemoryMov<bitsize>(code, value_idx, src_ptr, ordered);
fastmem_patch_info.emplace(
mcl::bit_cast<u64>(location),
std::bit_cast<u64>(location),
FastmemPatchInfo{
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
std::bit_cast<u64>(code.getCurr()),
std::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_exclusive_fastmem_failure,
});
@ -383,7 +383,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
code.call(wrapped_fn);
}
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
EmitWriteMemoryMov<bitsize>(code, tmp, value_idx, false);
EmitExclusiveUnlock(code, conf, tmp, tmp2.cvt32());
@ -434,14 +434,14 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
code.movzx(tmp.cvt32(), code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)]);
code.test(tmp.cvt8(), tmp.cvt8());
code.je(*end, code.T_NEAR);
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.cmp(qword[tmp], vaddr);
code.jne(*end, code.T_NEAR);
EmitExclusiveTestAndClear(code, conf, vaddr, tmp, rax);
code.mov(code.byte[code.ABI_JIT_PTR + offsetof(AxxJitState, exclusive_state)], u8(0));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, std::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
if constexpr (bitsize == 128) {
code.mov(rax, qword[tmp + 0]);
@ -499,10 +499,10 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
code.call(wrapped_fn);
fastmem_patch_info.emplace(
mcl::bit_cast<u64>(location),
std::bit_cast<u64>(location),
FastmemPatchInfo{
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
std::bit_cast<u64>(code.getCurr()),
std::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_exclusive_fastmem_failure,
});

11
src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.h

@ -1,9 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2022 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/bit_cast.hpp>
#include <bit>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/a32_emit_x64.h"
@ -342,7 +345,7 @@ void EmitExclusiveLock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64 p
return;
}
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
EmitSpinLockLock(code, pointer, tmp);
}
@ -352,7 +355,7 @@ void EmitExclusiveUnlock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64
return;
}
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
EmitSpinLockUnlock(code, pointer, tmp);
}
@ -369,7 +372,7 @@ void EmitExclusiveTestAndClear(BlockOfCode& code, const UserConfig& conf, Xbyak:
continue;
}
Xbyak::Label ok;
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
code.mov(pointer, std::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
code.cmp(qword[pointer], vaddr);
code.jne(ok, code.T_NEAR);
code.mov(qword[pointer], tmp);

8
src/dynarmic/src/dynarmic/backend/x64/exception_handler_windows.cpp

@ -13,7 +13,7 @@
#include <vector>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include "dynarmic/common/common_types.h"
#include "dynarmic/backend/exception_handler.h"
@ -184,20 +184,20 @@ struct ExceptionHandler::Impl final {
// Our 3rd argument is a PCONTEXT.
// If not within our codeblock, ignore this exception.
code.mov(code.rax, Safe::Negate(mcl::bit_cast<u64>(code.getCode())));
code.mov(code.rax, Safe::Negate(std::bit_cast<u64>(code.getCode())));
code.add(code.rax, code.qword[code.ABI_PARAM3 + Xbyak::RegExp(offsetof(CONTEXT, Rip))]);
code.cmp(code.rax, static_cast<u32>(code.GetTotalCodeSize()));
code.ja(exception_handler_without_cb);
code.lea(code.rsp, code.ptr[code.rsp - 8]);
code.mov(code.ABI_PARAM1, mcl::bit_cast<u64>(&cb));
code.mov(code.ABI_PARAM1, std::bit_cast<u64>(&cb));
code.mov(code.ABI_PARAM2, code.ABI_PARAM3);
code.CallLambda(
[](const std::function<FakeCall(u64)>& cb_, PCONTEXT ctx) {
FakeCall fc = cb_(ctx->Rip);
ctx->Rsp -= sizeof(u64);
*mcl::bit_cast<u64*>(ctx->Rsp) = fc.ret_rip;
*std::bit_cast<u64*>(ctx->Rsp) = fc.ret_rip;
ctx->Rip = fc.call_rip;
});
code.add(code.rsp, 8);

7
src/dynarmic/src/dynarmic/backend/x64/perf_map.h

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
@ -7,7 +10,7 @@
#include <string_view>
#include <mcl/bit_cast.hpp>
#include <bit>
namespace Dynarmic::Backend::X64 {
@ -17,7 +20,7 @@ void PerfMapRegister(const void* start, const void* end, std::string_view friend
template<typename T>
void PerfMapRegister(T start, const void* end, std::string_view friendly_name) {
detail::PerfMapRegister(mcl::bit_cast<const void*>(start), end, friendly_name);
detail::PerfMapRegister(std::bit_cast<const void*>(start), end, friendly_name);
}
void PerfMapClear();

2
src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp

@ -15,7 +15,7 @@
#include <fmt/ostream.h>
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/abi.h"

4
src/dynarmic/src/dynarmic/common/llvm_disassemble.cpp

@ -16,7 +16,7 @@
#endif
#include "dynarmic/common/assert.h"
#include <mcl/bit_cast.hpp>
#include <bit>
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/llvm_disassemble.h"
@ -53,7 +53,7 @@ std::string DisassembleX64(const void* begin, const void* end) {
LLVMDisasmDispose(llvm_ctx);
#else
result += fmt::format("(recompile with DYNARMIC_USE_LLVM=ON to disassemble the generated x86_64 code)\n");
result += fmt::format("start: {:016x}, end: {:016x}\n", mcl::bit_cast<u64>(begin), mcl::bit_cast<u64>(end));
result += fmt::format("start: {:016x}, end: {:016x}\n", std::bit_cast<u64>(begin), std::bit_cast<u64>(end));
#endif
return result;

8
src/dynarmic/src/dynarmic/ir/ir_emitter.h

@ -2931,19 +2931,19 @@ public:
}
void CallHostFunction(void (*fn)(void)) {
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), Value{}, Value{}, Value{});
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), Value{}, Value{}, Value{});
}
void CallHostFunction(void (*fn)(u64), const U64& arg1) {
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, Value{}, Value{});
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, Value{}, Value{});
}
void CallHostFunction(void (*fn)(u64, u64), const U64& arg1, const U64& arg2) {
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, arg2, Value{});
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, arg2, Value{});
}
void CallHostFunction(void (*fn)(u64, u64, u64), const U64& arg1, const U64& arg2, const U64& arg3) {
Inst(Opcode::CallHostFunction, Imm64(mcl::bit_cast<u64>(fn)), arg1, arg2, arg3);
Inst(Opcode::CallHostFunction, Imm64(std::bit_cast<u64>(fn)), arg1, arg2, arg3);
}
void SetTerm(const Terminal& terminal) {

Loading…
Cancel
Save