Browse Source

remove mcl/architecture dependency

Signed-off-by: lizzie <lizzie@eden-emu.dev>
pull/2775/head
lizzie 4 months ago
committed by crueter
parent
commit
d6d8be0a6e
  1. 20
      src/dynarmic/src/dynarmic/backend/exception_handler.h
  2. 1
      src/dynarmic/src/dynarmic/backend/exception_handler_macos.cpp
  3. 2
      src/dynarmic/src/dynarmic/backend/exception_handler_macos_mig.c
  4. 6
      src/dynarmic/src/dynarmic/backend/exception_handler_posix.cpp
  5. 2
      src/dynarmic/src/dynarmic/backend/exception_handler_windows.cpp
  6. 52
      src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.cpp.inc
  7. 1
      src/dynarmic/tests/test_generator.cpp

20
src/dynarmic/src/dynarmic/backend/exception_handler.h

@ -11,19 +11,17 @@
#include <functional>
#include <memory>
#include <optional>
#include <mcl/macro/architecture.hpp>
#include "dynarmic/common/common_types.h"
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
} // namespace Dynarmic::Backend::X64
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
namespace oaknut {
class CodeBlock;
} // namespace oaknut
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
namespace Dynarmic::Backend::RV64 {
class CodeBlock;
} // namespace Dynarmic::Backend::RV64
@ -33,16 +31,16 @@ class CodeBlock;
namespace Dynarmic::Backend {
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
struct FakeCall {
u64 call_rip;
u64 ret_rip;
};
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
struct FakeCall {
u64 call_pc;
};
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
struct FakeCall {
};
#else
@ -54,11 +52,11 @@ public:
ExceptionHandler();
~ExceptionHandler();
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
void Register(X64::BlockOfCode& code);
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
void Register(oaknut::CodeBlock& mem, std::size_t mem_size);
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
void Register(RV64::CodeBlock& mem, std::size_t mem_size);
#else
# error "Invalid architecture"

1
src/dynarmic/src/dynarmic/backend/exception_handler_macos.cpp

@ -20,7 +20,6 @@
#include <fmt/format.h>
#include "dynarmic/common/assert.h"
#include <numeric>
#include <mcl/macro/architecture.hpp>
#include "dynarmic/common/common_types.h"
#include "dynarmic/backend/exception_handler.h"

2
src/dynarmic/src/dynarmic/backend/exception_handler_macos_mig.c

@ -3,8 +3,6 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/macro/architecture.hpp>
#if defined(ARCHITECTURE_x86_64)
# include "dynarmic/backend/x64/mig/mach_exc_server.c"
#elif defined(ARCHITECTURE_arm64)

6
src/dynarmic/src/dynarmic/backend/exception_handler_posix.cpp

@ -187,15 +187,15 @@ private:
ExceptionHandler::ExceptionHandler() = default;
ExceptionHandler::~ExceptionHandler() = default;
#if defined(MCL_ARCHITECTURE_X86_64)
#if defined(ARCHITECTURE_x86_64)
void ExceptionHandler::Register(X64::BlockOfCode& code) {
impl = std::make_unique<Impl>(std::bit_cast<u64>(code.getCode()), code.GetTotalCodeSize());
}
#elif defined(MCL_ARCHITECTURE_ARM64)
#elif defined(ARCHITECTURE_arm64)
void ExceptionHandler::Register(oaknut::CodeBlock& mem, std::size_t size) {
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr()), size);
}
#elif defined(MCL_ARCHITECTURE_RISCV)
#elif defined(ARCHITECTURE_riscv64)
void ExceptionHandler::Register(RV64::CodeBlock& mem, std::size_t size) {
impl = std::make_unique<Impl>(std::bit_cast<u64>(mem.ptr<u64>()), size);
}

2
src/dynarmic/src/dynarmic/backend/exception_handler_windows.cpp

@ -6,8 +6,6 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/macro/architecture.hpp>
#if defined(ARCHITECTURE_x86_64)
# include "dynarmic/backend/x64/exception_handler_windows.cpp"
#elif defined(ARCHITECTURE_arm64)

52
src/dynarmic/src/dynarmic/backend/x64/emit_x64_memory.cpp.inc

@ -16,15 +16,10 @@ using Vector = std::array<u64, 2>;
}
std::optional<AxxEmitX64::DoNotFastmemMarker> AxxEmitX64::ShouldFastmem(AxxEmitContext& ctx, IR::Inst* inst) const {
if (!conf.fastmem_pointer || !exception_handler.SupportsFastmem()) {
if (!conf.fastmem_pointer || !exception_handler.SupportsFastmem())
return std::nullopt;
}
const auto marker = std::make_tuple(ctx.Location(), inst->GetName());
if (do_not_fastmem.count(marker) > 0) {
return std::nullopt;
}
return marker;
return do_not_fastmem.count(marker) <= 0 ? marker : std::nullopt;
}
FakeCall AxxEmitX64::FastmemCallback(u64 rip_) {
@ -59,16 +54,12 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
// Neither fastmem nor page table: Use callbacks
if constexpr (bitsize == 128) {
ctx.reg_alloc.HostCall(nullptr, {}, args[1]);
if (ordered) {
code.mfence();
}
if (ordered) code.mfence();
code.CallFunction(memory_read_128);
ctx.reg_alloc.DefineValue(inst, xmm1);
} else {
ctx.reg_alloc.HostCall(inst, {}, args[1]);
if (ordered) {
code.mfence();
}
if (ordered) code.mfence();
Devirtualize<callback>(conf.callbacks).EmitCall(code);
code.ZeroExtendFrom(bitsize, code.ABI_RETURN);
}
@ -154,9 +145,7 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.HostCall(nullptr, {}, args[1], args[2]);
Devirtualize<callback>(conf.callbacks).EmitCall(code);
}
if (ordered) {
code.mfence();
}
if (ordered) code.mfence();
EmitCheckMemoryAbort(ctx, inst);
return;
}
@ -296,11 +285,9 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
code.CallLambda(
[](AxxUserConfig& conf, Axx::VAddr vaddr, T value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<T>(conf.processor_id, vaddr,
[&](T expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
})
? 0
: 1;
[&](T expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
}) ? 0 : 1;
});
if (ordered) {
code.mfence();
@ -312,11 +299,9 @@ void AxxEmitX64::EmitExclusiveWriteMemory(AxxEmitContext& ctx, IR::Inst* inst) {
code.CallLambda(
[](AxxUserConfig& conf, Axx::VAddr vaddr, Vector& value) -> u32 {
return conf.global_monitor->DoExclusiveOperation<Vector>(conf.processor_id, vaddr,
[&](Vector expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
})
? 0
: 1;
[&](Vector expected) -> bool {
return (conf.callbacks->*callback)(vaddr, value, expected);
}) ? 0 : 1;
});
if (ordered) {
code.mfence();
@ -476,25 +461,20 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
const auto location = code.getCurr();
if constexpr (bitsize == 128) {
code.lock();
code.cmpxchg16b(ptr[dest_ptr]);
code.lock(); code.cmpxchg16b(ptr[dest_ptr]);
} else {
switch (bitsize) {
case 8:
code.lock();
code.cmpxchg(code.byte[dest_ptr], value.cvt8());
code.lock(); code.cmpxchg(code.byte[dest_ptr], value.cvt8());
break;
case 16:
code.lock();
code.cmpxchg(word[dest_ptr], value.cvt16());
code.lock(); code.cmpxchg(word[dest_ptr], value.cvt16());
break;
case 32:
code.lock();
code.cmpxchg(dword[dest_ptr], value.cvt32());
code.lock(); code.cmpxchg(dword[dest_ptr], value.cvt32());
break;
case 64:
code.lock();
code.cmpxchg(qword[dest_ptr], value.cvt64());
code.lock(); code.cmpxchg(qword[dest_ptr], value.cvt64());
break;
default:
UNREACHABLE();

1
src/dynarmic/tests/test_generator.cpp

@ -17,7 +17,6 @@
#include <vector>
#include <mcl/bit/swap.hpp>
#include <mcl/macro/architecture.hpp>
#include "dynarmic/common/common_types.h"
#include "./A32/testenv.h"

Loading…
Cancel
Save