Browse Source

[dynarmic] fix multicore macOS issue

Signed-off-by: lizzie <lizzie@eden-emu.dev>
lizzie/multicore-macos-fix1
lizzie 3 days ago
parent
commit
535a80e8e3
  1. 76
      src/core/cpu_manager.cpp
  2. 1
      src/core/cpu_manager.h
  3. 72
      src/core/hle/kernel/physical_core.cpp
  4. 43
      src/dynarmic/src/dynarmic/backend/arm64/a32_interface.cpp
  5. 51
      src/dynarmic/src/dynarmic/backend/arm64/a64_interface.cpp
  6. 28
      src/dynarmic/src/dynarmic/backend/riscv64/a32_interface.cpp
  7. 42
      src/dynarmic/src/dynarmic/backend/x64/a32_interface.cpp
  8. 44
      src/dynarmic/src/dynarmic/backend/x64/a64_interface.cpp
  9. 10
      src/dynarmic/src/dynarmic/interface/A32/a32.h
  10. 6
      src/dynarmic/src/dynarmic/interface/A64/a64.h

76
src/core/cpu_manager.cpp

@ -26,15 +26,42 @@ CpuManager::~CpuManager() = default;
void CpuManager::Initialize() {
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1;
gpu_barrier = std::make_unique<Common::Barrier>(num_cores + 1);
for (std::size_t core = 0; core < num_cores; core++)
core_data[core].host_thread = std::jthread([this, core](std::stop_token token) { RunThread(token, core); });
for (std::size_t core = 0; core < num_cores; core++) {
core_data[core].host_thread = std::jthread([this, core](std::stop_token token) {
// Initialization
system.RegisterCoreThread(core);
Common::SetCurrentThreadName(("CPUCore_" + std::to_string(core)).c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
#ifdef __ANDROID__
// Aimed specifically for Snapdragon 8 Elite devices
// This kills performance on desktop, but boosts perf for UMA devices
// like the S8E. Mediatek and Mali likely won't suffer.
Common::PinCurrentThreadToPerformanceCore(core);
#endif
auto& data = core_data[core];
data.host_context = Common::Fiber::ThreadToFiber();
// Running
if (gpu_barrier->Sync(token)) {
if (!is_async_gpu && !is_multicore) {
system.GPU().ObtainContext();
}
auto& kernel = system.Kernel();
auto& scheduler = *kernel.CurrentScheduler();
auto* thread = scheduler.GetSchedulerCurrentThread();
Kernel::SetCurrentThread(kernel, thread);
Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext());
}
// Cleanup
data.host_context->Exit();
});
}
}
void CpuManager::Shutdown() {
for (std::size_t core = 0; core < num_cores; core++) {
if (core_data[core].host_thread.joinable()) {
core_data[core].host_thread.request_stop();
core_data[core].host_thread.join();
for (auto it = core_data.begin(); it != core_data.end(); ++it) {
if (it->host_thread.joinable()) {
it->host_thread.request_stop();
it->host_thread.join();
}
}
}
@ -183,41 +210,4 @@ void CpuManager::ShutdownThread() {
UNREACHABLE();
}
void CpuManager::RunThread(std::stop_token token, std::size_t core) {
/// Initialization
system.RegisterCoreThread(core);
std::string name = is_multicore ? ("CPUCore_" + std::to_string(core)) : std::string{"CPUThread"};
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
#ifdef __ANDROID__
// Aimed specifically for Snapdragon 8 Elite devices
// This kills performance on desktop, but boosts perf for UMA devices
// like the S8E. Mediatek and Mali likely won't suffer.
Common::PinCurrentThreadToPerformanceCore(core);
#endif
auto& data = core_data[core];
data.host_context = Common::Fiber::ThreadToFiber();
// Cleanup
SCOPE_EXIT {
data.host_context->Exit();
};
// Running
if (!gpu_barrier->Sync(token)) {
return;
}
if (!is_async_gpu && !is_multicore) {
system.GPU().ObtainContext();
}
auto& kernel = system.Kernel();
auto& scheduler = *kernel.CurrentScheduler();
auto* thread = scheduler.GetSchedulerCurrentThread();
Kernel::SetCurrentThread(kernel, thread);
Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext());
}
} // namespace Core

1
src/core/cpu_manager.h

@ -84,7 +84,6 @@ private:
void GuestActivate();
void HandleInterrupt();
void ShutdownThread();
void RunThread(std::stop_token stop_token, std::size_t core);
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;

72
src/core/hle/kernel/physical_core.cpp

@ -119,10 +119,9 @@ void PhysicalCore::RunThread(Kernel::KThread* thread) {
// Since scheduling may occur here, we cannot use any cached
// state after returning from calls we make.
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (breakpoint || (prefetch_abort && may_abort)) {
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (breakpoint) {
interface->RewindBreakpointInstruction();
}
@ -133,26 +132,19 @@ void PhysicalCore::RunThread(Kernel::KThread* thread) {
}
thread->RequestSuspend(SuspendType::Debug);
return;
}
// Notify the debugger and go to sleep on data abort.
if (data_abort) {
} else if (data_abort) {
// Notify the debugger and go to sleep on data abort.
if (system.DebuggerEnabled()) {
system.GetDebugger().NotifyThreadWatchpoint(thread, *interface->HaltedWatchpoint());
}
thread->RequestSuspend(SuspendType::Debug);
return;
}
// Handle system calls.
if (supervisor_call) {
// Perform call.
} else if (supervisor_call) {
// Handle system calls: Perform call.
Svc::Call(system, interface->GetSvcNumber());
return;
}
// Handle external interrupt sources.
if (interrupt || m_is_single_core) {
} else if (interrupt || m_is_single_core) {
// Handle external interrupt sources.
return;
}
}
@ -160,16 +152,14 @@ void PhysicalCore::RunThread(Kernel::KThread* thread) {
void PhysicalCore::LoadContext(const KThread* thread) {
auto* const process = thread->GetOwnerProcess();
if (!process) {
if (process) {
// Kernel threads do not run on emulated CPU cores.
return;
}
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->SetContext(thread->GetContext());
interface->SetTpidrroEl0(GetInteger(thread->GetTlsAddress()));
interface->SetWatchpointArray(&process->GetWatchpoints());
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->SetContext(thread->GetContext());
interface->SetTpidrroEl0(GetInteger(thread->GetTlsAddress()));
interface->SetWatchpointArray(&process->GetWatchpoints());
}
}
}
@ -179,14 +169,12 @@ void PhysicalCore::LoadSvcArguments(const KProcess& process, std::span<const uin
void PhysicalCore::SaveContext(KThread* thread) const {
auto* const process = thread->GetOwnerProcess();
if (!process) {
if (process) {
// Kernel threads do not run on emulated CPU cores.
return;
}
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->GetContext(thread->GetContext());
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->GetContext(thread->GetContext());
}
}
}
@ -206,13 +194,11 @@ void PhysicalCore::CloneFpuStatus(KThread* dst) const {
void PhysicalCore::LogBacktrace() {
auto* process = GetCurrentProcessPointer(m_kernel);
if (!process) {
return;
}
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->LogBacktrace(process);
if (process) {
auto* interface = process->GetArmInterface(m_core_index);
if (interface) {
interface->LogBacktrace(process);
}
}
}
@ -240,12 +226,10 @@ void PhysicalCore::Interrupt() {
m_on_interrupt.notify_one();
// If there is no thread running, we are done.
if (arm_interface == nullptr) {
return;
if (arm_interface != nullptr) {
// Interrupt the CPU.
arm_interface->SignalInterrupt(thread);
}
// Interrupt the CPU.
arm_interface->SignalInterrupt(thread);
}
void PhysicalCore::ClearInterrupt() {

43
src/dynarmic/src/dynarmic/backend/arm64/a32_interface.cpp

@ -32,45 +32,31 @@ struct Jit::Impl final {
, core(conf) {}
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&halt_reason)));
is_executing = true;
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&halt_reason)));
is_executing = true;
HaltReason hr = core.Step(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, static_cast<u32>(start_address + length - 1)));
HaltExecution(HaltReason::CacheInvalidation);
}
@ -80,12 +66,12 @@ struct Jit::Impl final {
}
void HaltExecution(HaltReason hr) {
Atomic::Or(&halt_reason, static_cast<u32>(hr));
Atomic::Or(&halt_reason, u32(hr));
Atomic::Barrier();
}
void ClearHalt(HaltReason hr) {
Atomic::And(&halt_reason, ~static_cast<u32>(hr));
Atomic::And(&halt_reason, ~u32(hr));
Atomic::Barrier();
}
@ -132,21 +118,15 @@ struct Jit::Impl final {
private:
void PerformRequestedCacheInvalidation(HaltReason hr) {
if (Has(hr, HaltReason::CacheInvalidation)) {
std::unique_lock lock{invalidation_mutex};
ClearHalt(HaltReason::CacheInvalidation);
if (invalidate_entire_cache) {
current_address_space.ClearCache();
invalidate_entire_cache = false;
invalid_cache_ranges.clear();
return;
}
if (!invalid_cache_ranges.empty()) {
current_address_space.InvalidateCacheRanges(invalid_cache_ranges);
invalid_cache_ranges.clear();
return;
}
@ -160,10 +140,9 @@ private:
A32Core core;
volatile u32 halt_reason = 0;
std::mutex invalidation_mutex;
boost::icl::interval_set<u32> invalid_cache_ranges;
bool invalidate_entire_cache = false;
bool is_executing = false;
};
Jit::Jit(UserConfig conf)

51
src/dynarmic/src/dynarmic/backend/arm64/a64_interface.cpp

@ -27,50 +27,37 @@ using namespace Backend::Arm64;
struct Jit::Impl final {
Impl(Jit*, A64::UserConfig conf)
: conf(conf)
, current_address_space(conf)
, core(conf) {}
: conf(conf)
, current_address_space(conf)
, core(conf)
{}
HaltReason Run() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
is_executing = true;
SCOPE_EXIT {
is_executing = false;
};
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&halt_reason)));
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
HaltReason Step() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&halt_reason)));
is_executing = true;
SCOPE_EXIT {
is_executing = false;
};
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&halt_reason)));
HaltReason hr = core.Step(current_address_space, current_state, &halt_reason);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(std::uint64_t start_address, std::size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u64>::closed(start_address, start_address + length - 1));
HaltExecution(HaltReason::CacheInvalidation);
}
@ -80,11 +67,13 @@ struct Jit::Impl final {
}
void HaltExecution(HaltReason hr) {
Atomic::Or(&halt_reason, static_cast<u32>(hr));
Atomic::Or(&halt_reason, u32(hr));
Atomic::Barrier();
}
void ClearHalt(HaltReason hr) {
Atomic::And(&halt_reason, ~static_cast<u32>(hr));
Atomic::And(&halt_reason, ~u32(hr));
Atomic::Barrier();
}
std::uint64_t PC() const {
@ -147,10 +136,6 @@ struct Jit::Impl final {
current_state.exclusive_state = false;
}
bool IsExecuting() const {
return is_executing;
}
std::string Disassemble() const {
return {};
}
@ -158,21 +143,15 @@ struct Jit::Impl final {
private:
void PerformRequestedCacheInvalidation(HaltReason hr) {
if (Has(hr, HaltReason::CacheInvalidation)) {
std::unique_lock lock{invalidation_mutex};
ClearHalt(HaltReason::CacheInvalidation);
if (invalidate_entire_cache) {
current_address_space.ClearCache();
invalidate_entire_cache = false;
invalid_cache_ranges.clear();
return;
}
if (!invalid_cache_ranges.empty()) {
current_address_space.InvalidateCacheRanges(invalid_cache_ranges);
invalid_cache_ranges.clear();
return;
}
@ -185,8 +164,6 @@ private:
A64Core core;
volatile u32 halt_reason = 0;
std::mutex invalidation_mutex;
boost::icl::interval_set<u64> invalid_cache_ranges;
bool invalidate_entire_cache = false;
bool is_executing = false;
@ -307,10 +284,6 @@ void Jit::ClearExclusiveState() {
impl->ClearExclusiveState();
}
bool Jit::IsExecuting() const {
return impl->IsExecuting();
}
std::string Jit::Disassemble() const {
return impl->Disassemble();
}

28
src/dynarmic/src/dynarmic/backend/riscv64/a32_interface.cpp

@ -32,41 +32,29 @@ struct Jit::Impl final {
, core(conf) {}
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT(!is_executing);
is_executing = true;
HaltReason hr = core.Run(current_address_space, current_state, &halt_reason);
RequestCacheInvalidation();
is_executing = false;
return hr;
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT(!is_executing);
is_executing = true;
UNIMPLEMENTED();
RequestCacheInvalidation();
is_executing = false;
return HaltReason{};
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(u32 start_address, size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, static_cast<u32>(start_address + length - 1)));
HaltExecution(HaltReason::CacheInvalidation);
}
@ -132,12 +120,10 @@ private:
A32JitState current_state{};
A32AddressSpace current_address_space;
A32Core core;
volatile u32 halt_reason = 0;
std::mutex invalidation_mutex;
boost::icl::interval_set<u32> invalid_cache_ranges;
bool invalidate_entire_cache = false;
bool is_executing = false;
};
Jit::Jit(UserConfig conf)

42
src/dynarmic/src/dynarmic/backend/x64/a32_interface.cpp

@ -75,14 +75,9 @@ struct Jit::Impl {
~Impl() = default;
HaltReason Run() {
ASSERT(!jit_interface->is_executing);
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
is_executing = true;
const CodePtr current_codeptr = [this] {
// RSB optimization
const u32 new_rsb_ptr = (jit_state.rsb_ptr - 1) & A32JitState::RSBPtrMask;
@ -93,44 +88,34 @@ struct Jit::Impl {
return GetCurrentBlock();
}();
const HaltReason hr = block_of_code.RunCode(&jit_state, current_codeptr);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
HaltReason Step() {
ASSERT(!jit_interface->is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
jit_interface->is_executing = true;
SCOPE_EXIT {
jit_interface->is_executing = false;
};
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&jit_state.halt_reason)));
is_executing = true;
const HaltReason hr = block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
std::unique_lock lock{invalidation_mutex};
invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, static_cast<u32>(start_address + length - 1)));
invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, u32(start_address + length - 1)));
HaltExecution(HaltReason::CacheInvalidation);
}
void Reset() {
ASSERT(!jit_interface->is_executing);
ASSERT(!is_executing);
jit_state = {};
}
@ -223,14 +208,10 @@ private:
void PerformRequestedCacheInvalidation(HaltReason hr) {
if (Has(hr, HaltReason::CacheInvalidation)) {
std::unique_lock lock{invalidation_mutex};
ClearHalt(HaltReason::CacheInvalidation);
if (!invalidate_entire_cache && invalid_cache_ranges.empty()) {
return;
}
jit_state.ResetRSB();
if (invalidate_entire_cache) {
block_of_code.ClearCache();
@ -251,11 +232,10 @@ private:
// Keep it here, you don't wanna mess with the fuckery that's initializer lists
const A32::UserConfig conf;
Jit* jit_interface;
// Requests made during execution to invalidate the cache are queued up here.
bool invalidate_entire_cache = false;
boost::icl::interval_set<u32> invalid_cache_ranges;
std::mutex invalidation_mutex;
bool invalidate_entire_cache = false;
bool is_executing = false;
};
Jit::Jit(UserConfig conf)

44
src/dynarmic/src/dynarmic/backend/x64/a64_interface.cpp

@ -75,14 +75,8 @@ public:
HaltReason Run() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
is_executing = true;
SCOPE_EXIT {
this->is_executing = false;
};
// TODO: Check code alignment
const CodePtr current_code_ptr = [this] {
// RSB optimization
const u32 new_rsb_ptr = (jit_state.rsb_ptr - 1) & A64JitState::RSBPtrMask;
@ -94,37 +88,28 @@ public:
}();
const HaltReason hr = block_of_code.RunCode(&jit_state, current_code_ptr);
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
HaltReason Step() {
ASSERT(!is_executing);
PerformRequestedCacheInvalidation(static_cast<HaltReason>(Atomic::Load(&jit_state.halt_reason)));
PerformRequestedCacheInvalidation(HaltReason(Atomic::Load(&jit_state.halt_reason)));
is_executing = true;
SCOPE_EXIT {
this->is_executing = false;
};
const HaltReason hr = block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
PerformRequestedCacheInvalidation(hr);
is_executing = false;
return hr;
}
void ClearCache() {
std::unique_lock lock{invalidation_mutex};
invalidate_entire_cache = true;
HaltExecution(HaltReason::CacheInvalidation);
}
void InvalidateCacheRange(u64 start_address, size_t length) {
std::unique_lock lock{invalidation_mutex};
const auto end_address = static_cast<u64>(start_address + length - 1);
const auto end_address = u64(start_address + length - 1);
const auto range = boost::icl::discrete_interval<u64>::closed(start_address, end_address);
invalid_cache_ranges.add(range);
HaltExecution(HaltReason::CacheInvalidation);
@ -136,11 +121,11 @@ public:
}
void HaltExecution(HaltReason hr) {
Atomic::Or(&jit_state.halt_reason, static_cast<u32>(hr));
Atomic::Or(&jit_state.halt_reason, u32(hr));
}
void ClearHalt(HaltReason hr) {
Atomic::And(&jit_state.halt_reason, ~static_cast<u32>(hr));
Atomic::And(&jit_state.halt_reason, ~u32(hr));
}
u64 GetSP() const {
@ -228,10 +213,6 @@ public:
jit_state.exclusive_state = 0;
}
bool IsExecuting() const {
return is_executing;
}
std::string Disassemble() const {
const size_t size = reinterpret_cast<const char*>(block_of_code.getCurr()) - reinterpret_cast<const char*>(block_of_code.GetCodeBegin());
auto const* p = reinterpret_cast<const char*>(block_of_code.GetCodeBegin());
@ -280,14 +261,10 @@ private:
void PerformRequestedCacheInvalidation(HaltReason hr) {
if (Has(hr, HaltReason::CacheInvalidation)) {
std::unique_lock lock{invalidation_mutex};
ClearHalt(HaltReason::CacheInvalidation);
if (!invalidate_entire_cache && invalid_cache_ranges.empty()) {
return;
}
jit_state.ResetRSB();
if (invalidate_entire_cache) {
block_of_code.ClearCache();
@ -306,10 +283,9 @@ private:
BlockOfCode block_of_code;
A64EmitX64 emitter;
Optimization::PolyfillOptions polyfill_options;
bool is_executing = false;
bool invalidate_entire_cache = false;
boost::icl::interval_set<u64> invalid_cache_ranges;
std::mutex invalidation_mutex;
bool invalidate_entire_cache = false;
bool is_executing = false;
};
Jit::Jit(UserConfig conf)
@ -421,10 +397,6 @@ void Jit::ClearExclusiveState() {
impl->ClearExclusiveState();
}
bool Jit::IsExecuting() const {
return impl->IsExecuting();
}
std::string Jit::Disassemble() const {
return impl->Disassemble();
}

10
src/dynarmic/src/dynarmic/interface/A32/a32.h

@ -84,21 +84,11 @@ public:
/// Clears exclusive state for this core.
void ClearExclusiveState();
/**
* Returns true if Jit::Run was called but hasn't returned yet.
* i.e.: We're in a callback.
*/
bool IsExecuting() const {
return is_executing;
}
/// @brief Disassemble the instructions following the current pc and return
/// the resulting instructions as a vector of their string representations.
std::string Disassemble() const;
private:
bool is_executing = false;
struct Impl;
std::unique_ptr<Impl> impl;
};

6
src/dynarmic/src/dynarmic/interface/A64/a64.h

@ -116,12 +116,6 @@ public:
/// Clears exclusive state for this core.
void ClearExclusiveState();
/**
* Returns true if Jit::Run was called but hasn't returned yet.
* i.e.: We're in a callback.
*/
bool IsExecuting() const;
/// @brief Disassemble the instructions following the current pc and return
/// the resulting instructions as a vector of their string representations.
std::string Disassemble() const;

Loading…
Cancel
Save