|
|
|
@ -16,31 +16,28 @@ |
|
|
|
|
|
|
|
namespace Core { |
|
|
|
|
|
|
|
CpuManager::CpuManager(System& system_) |
|
|
|
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {} |
|
|
|
CpuManager::CpuManager(System& system_) : system{system_} {} |
|
|
|
CpuManager::~CpuManager() = default; |
|
|
|
|
|
|
|
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, |
|
|
|
std::size_t core) { |
|
|
|
cpu_manager.RunThread(stop_token, core); |
|
|
|
cpu_manager.RunThread(core); |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::Initialize() { |
|
|
|
running_mode = true; |
|
|
|
if (is_multicore) { |
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
|
|
|
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1; |
|
|
|
|
|
|
|
for (std::size_t core = 0; core < num_cores; core++) { |
|
|
|
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); |
|
|
|
} |
|
|
|
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1); |
|
|
|
} else { |
|
|
|
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); |
|
|
|
pause_barrier = std::make_unique<Common::Barrier>(2); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::Shutdown() { |
|
|
|
running_mode = false; |
|
|
|
Pause(false); |
|
|
|
for (std::size_t core = 0; core < num_cores; core++) { |
|
|
|
if (core_data[core].host_thread.joinable()) { |
|
|
|
core_data[core].host_thread.join(); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { |
|
|
|
@ -51,8 +48,8 @@ std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() { |
|
|
|
return IdleThreadFunction; |
|
|
|
} |
|
|
|
|
|
|
|
std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() { |
|
|
|
return SuspendThreadFunction; |
|
|
|
std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() { |
|
|
|
return ShutdownThreadFunction; |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::GuestThreadFunction(void* cpu_manager_) { |
|
|
|
@ -82,17 +79,12 @@ void CpuManager::IdleThreadFunction(void* cpu_manager_) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::SuspendThreadFunction(void* cpu_manager_) { |
|
|
|
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); |
|
|
|
if (cpu_manager->is_multicore) { |
|
|
|
cpu_manager->MultiCoreRunSuspendThread(); |
|
|
|
} else { |
|
|
|
cpu_manager->SingleCoreRunSuspendThread(); |
|
|
|
} |
|
|
|
void CpuManager::ShutdownThreadFunction(void* cpu_manager) { |
|
|
|
static_cast<CpuManager*>(cpu_manager)->ShutdownThread(); |
|
|
|
} |
|
|
|
|
|
|
|
void* CpuManager::GetStartFuncParamater() { |
|
|
|
return static_cast<void*>(this); |
|
|
|
void* CpuManager::GetStartFuncParameter() { |
|
|
|
return this; |
|
|
|
} |
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -134,21 +126,6 @@ void CpuManager::MultiCoreRunIdleThread() { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::MultiCoreRunSuspendThread() { |
|
|
|
auto& kernel = system.Kernel(); |
|
|
|
kernel.CurrentScheduler()->OnThreadStart(); |
|
|
|
while (true) { |
|
|
|
auto core = kernel.CurrentPhysicalCoreIndex(); |
|
|
|
auto& scheduler = *kernel.CurrentScheduler(); |
|
|
|
Kernel::KThread* current_thread = scheduler.GetCurrentThread(); |
|
|
|
current_thread->DisableDispatch(); |
|
|
|
|
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); |
|
|
|
ASSERT(core == kernel.CurrentPhysicalCoreIndex()); |
|
|
|
scheduler.RescheduleCurrentCore(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// SingleCore ///
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
@ -194,21 +171,6 @@ void CpuManager::SingleCoreRunIdleThread() { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::SingleCoreRunSuspendThread() { |
|
|
|
auto& kernel = system.Kernel(); |
|
|
|
kernel.CurrentScheduler()->OnThreadStart(); |
|
|
|
while (true) { |
|
|
|
auto core = kernel.GetCurrentHostThreadID(); |
|
|
|
auto& scheduler = *kernel.CurrentScheduler(); |
|
|
|
Kernel::KThread* current_thread = scheduler.GetCurrentThread(); |
|
|
|
current_thread->DisableDispatch(); |
|
|
|
|
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context); |
|
|
|
ASSERT(core == kernel.GetCurrentHostThreadID()); |
|
|
|
scheduler.RescheduleCurrentCore(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::PreemptSingleCore(bool from_running_enviroment) { |
|
|
|
{ |
|
|
|
auto& kernel = system.Kernel(); |
|
|
|
@ -241,24 +203,16 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::Pause(bool paused) { |
|
|
|
std::scoped_lock lk{pause_lock}; |
|
|
|
|
|
|
|
if (pause_state == paused) { |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
// Set the new state
|
|
|
|
pause_state.store(paused); |
|
|
|
|
|
|
|
// Wake up any waiting threads
|
|
|
|
pause_state.notify_all(); |
|
|
|
void CpuManager::ShutdownThread() { |
|
|
|
auto& kernel = system.Kernel(); |
|
|
|
auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; |
|
|
|
auto* current_thread = kernel.GetCurrentEmuThread(); |
|
|
|
|
|
|
|
// Wait for all threads to successfully change state before returning
|
|
|
|
pause_barrier->Sync(); |
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); |
|
|
|
UNREACHABLE(); |
|
|
|
} |
|
|
|
|
|
|
|
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { |
|
|
|
void CpuManager::RunThread(std::size_t core) { |
|
|
|
/// Initialization
|
|
|
|
system.RegisterCoreThread(core); |
|
|
|
std::string name; |
|
|
|
@ -272,8 +226,6 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { |
|
|
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::High); |
|
|
|
auto& data = core_data[core]; |
|
|
|
data.host_context = Common::Fiber::ThreadToFiber(); |
|
|
|
const bool sc_sync = !is_async_gpu && !is_multicore; |
|
|
|
bool sc_sync_first_use = sc_sync; |
|
|
|
|
|
|
|
// Cleanup
|
|
|
|
SCOPE_EXIT({ |
|
|
|
@ -281,32 +233,13 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { |
|
|
|
MicroProfileOnThreadExit(); |
|
|
|
}); |
|
|
|
|
|
|
|
/// Running
|
|
|
|
while (running_mode) { |
|
|
|
if (pause_state.load(std::memory_order_relaxed)) { |
|
|
|
// Wait for caller to acknowledge pausing
|
|
|
|
pause_barrier->Sync(); |
|
|
|
|
|
|
|
// Wait until unpaused
|
|
|
|
pause_state.wait(true, std::memory_order_relaxed); |
|
|
|
|
|
|
|
// Wait for caller to acknowledge unpausing
|
|
|
|
pause_barrier->Sync(); |
|
|
|
} |
|
|
|
|
|
|
|
if (sc_sync_first_use) { |
|
|
|
// Running
|
|
|
|
if (!is_async_gpu && !is_multicore) { |
|
|
|
system.GPU().ObtainContext(); |
|
|
|
sc_sync_first_use = false; |
|
|
|
} |
|
|
|
|
|
|
|
// Emulation was stopped
|
|
|
|
if (stop_token.stop_requested()) { |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); |
|
|
|
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} // namespace Core
|