|
|
|
@ -710,10 +710,7 @@ void KScheduler::Unload(KThread* thread) { |
|
|
|
} |
|
|
|
|
|
|
|
void KScheduler::Reload(KThread* thread) { |
|
|
|
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); |
|
|
|
|
|
|
|
if (thread) { |
|
|
|
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); |
|
|
|
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); |
|
|
|
|
|
|
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
|
|
|
cpu_core.LoadContext(thread->GetContext32()); |
|
|
|
@ -722,11 +719,10 @@ void KScheduler::Reload(KThread* thread) { |
|
|
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); |
|
|
|
cpu_core.ClearExclusiveState(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void KScheduler::SwitchContextStep2() { |
|
|
|
// Load context of new thread
|
|
|
|
Reload(current_thread.load()); |
|
|
|
Reload(GetCurrentThread()); |
|
|
|
|
|
|
|
RescheduleCurrentCore(); |
|
|
|
} |
|
|
|
@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() { |
|
|
|
KThread* previous_thread = GetCurrentThread(); |
|
|
|
KThread* next_thread = state.highest_priority_thread; |
|
|
|
|
|
|
|
state.needs_scheduling = false; |
|
|
|
state.needs_scheduling.store(false); |
|
|
|
|
|
|
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
|
|
|
if (next_thread == nullptr) { |
|
|
|
next_thread = idle_thread; |
|
|
|
} |
|
|
|
|
|
|
|
if (next_thread->GetCurrentCore() != core_id) { |
|
|
|
next_thread->SetCurrentCore(core_id); |
|
|
|
} |
|
|
|
|
|
|
|
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
|
|
|
|
if (next_thread->GetThreadType() == ThreadType::Dummy) { |
|
|
|
ASSERT_MSG(false, "Dummy threads should never be scheduled!"); |
|
|
|
@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() { |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
if (next_thread->GetCurrentCore() != core_id) { |
|
|
|
next_thread->SetCurrentCore(core_id); |
|
|
|
} |
|
|
|
|
|
|
|
current_thread.store(next_thread); |
|
|
|
|
|
|
|
// Update the CPU time tracking variables.
|
|
|
|
KProcess* const previous_process = system.Kernel().CurrentProcess(); |
|
|
|
|
|
|
|
UpdateLastContextSwitchTime(previous_thread, previous_process); |
|
|
|
|
|
|
|
// Save context for previous thread
|
|
|
|
@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() { |
|
|
|
|
|
|
|
std::shared_ptr<Common::Fiber>* old_context; |
|
|
|
old_context = &previous_thread->GetHostContext(); |
|
|
|
|
|
|
|
// Set the new thread.
|
|
|
|
current_thread.store(next_thread); |
|
|
|
|
|
|
|
guard.Unlock(); |
|
|
|
|
|
|
|
Common::Fiber::YieldTo(*old_context, *switch_fiber); |
|
|
|
@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() { |
|
|
|
do { |
|
|
|
auto next_thread = current_thread.load(); |
|
|
|
if (next_thread != nullptr) { |
|
|
|
next_thread->context_guard.Lock(); |
|
|
|
if (next_thread->GetRawState() != ThreadState::Runnable) { |
|
|
|
const auto locked = next_thread->context_guard.TryLock(); |
|
|
|
if (state.needs_scheduling.load()) { |
|
|
|
next_thread->context_guard.Unlock(); |
|
|
|
break; |
|
|
|
} |
|
|
|
@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() { |
|
|
|
next_thread->context_guard.Unlock(); |
|
|
|
break; |
|
|
|
} |
|
|
|
if (!locked) { |
|
|
|
continue; |
|
|
|
} |
|
|
|
} |
|
|
|
auto thread = next_thread ? next_thread : idle_thread; |
|
|
|
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); |
|
|
|
|