|
|
@ -11,6 +11,7 @@ |
|
|
#include <mutex>
|
|
|
#include <mutex>
|
|
|
#include <optional>
|
|
|
#include <optional>
|
|
|
#include <vector>
|
|
|
#include <vector>
|
|
|
|
|
|
#include <array>
|
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
#include "common/assert.h"
|
|
|
#include "common/bit_util.h"
|
|
|
#include "common/bit_util.h"
|
|
|
@ -67,24 +68,29 @@ static void ResetThreadContext64(Kernel::Svc::ThreadContext& ctx, u64 stack_top, |
|
|
} // namespace
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
namespace Kernel { |
|
|
namespace Kernel { |
|
|
|
|
|
namespace { |
|
|
|
|
|
|
|
|
namespace { |
|
|
|
|
|
|
|
|
|
|
|
struct ThreadLocalRegion { |
|
|
|
|
|
|
|
|
struct ThreadLocalRegion { |
|
|
static constexpr std::size_t MessageBufferSize = 0x100; |
|
|
static constexpr std::size_t MessageBufferSize = 0x100; |
|
|
std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer; |
|
|
std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer; |
|
|
std::atomic_uint16_t disable_count; |
|
|
std::atomic_uint16_t disable_count; |
|
|
std::atomic_uint16_t interrupt_flag; |
|
|
std::atomic_uint16_t interrupt_flag; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { |
|
|
|
|
|
public: |
|
|
|
|
|
|
|
|
std::atomic_uint8_t cache_maintenance_flag; |
|
|
|
|
|
std::atomic_int64_t thread_cpu_time; |
|
|
|
|
|
}; |
|
|
|
|
|
static_assert(offsetof(ThreadLocalRegion, disable_count) == 0x100); |
|
|
|
|
|
static_assert(offsetof(ThreadLocalRegion, interrupt_flag) == 0x102); |
|
|
|
|
|
static_assert(offsetof(ThreadLocalRegion, cache_maintenance_flag) == 0x104); |
|
|
|
|
|
static_assert(offsetof(ThreadLocalRegion, thread_cpu_time) == 0x108); |
|
|
|
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { |
|
|
|
|
|
public: |
|
|
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel) |
|
|
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel) |
|
|
: KThreadQueueWithoutEndWait(kernel) {} |
|
|
: KThreadQueueWithoutEndWait(kernel) {} |
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { |
|
|
|
|
|
public: |
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { |
|
|
|
|
|
public: |
|
|
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl) |
|
|
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl) |
|
|
: KThreadQueue(kernel), m_wait_list(wl) {} |
|
|
: KThreadQueue(kernel), m_wait_list(wl) {} |
|
|
|
|
|
|
|
|
@ -96,17 +102,17 @@ public: |
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); |
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
private: |
|
|
|
|
|
|
|
|
private: |
|
|
KThread::WaiterList* m_wait_list{}; |
|
|
KThread::WaiterList* m_wait_list{}; |
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
KThread::KThread(KernelCore& kernel) |
|
|
|
|
|
|
|
|
KThread::KThread(KernelCore& kernel) |
|
|
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} |
|
|
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} |
|
|
KThread::~KThread() = default; |
|
|
|
|
|
|
|
|
KThread::~KThread() = default; |
|
|
|
|
|
|
|
|
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, |
|
|
|
|
|
|
|
|
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, |
|
|
s32 prio, s32 virt_core, KProcess* owner, ThreadType type) { |
|
|
s32 prio, s32 virt_core, KProcess* owner, ThreadType type) { |
|
|
// Assert parameters are valid.
|
|
|
// Assert parameters are valid.
|
|
|
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) || |
|
|
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) || |
|
|
@ -254,9 +260,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, |
|
|
|
|
|
|
|
|
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, |
|
|
KProcessAddress user_stack_top, s32 prio, s32 core, |
|
|
KProcessAddress user_stack_top, s32 prio, s32 core, |
|
|
KProcess* owner, ThreadType type, |
|
|
KProcess* owner, ThreadType type, |
|
|
std::function<void()>&& init_func) { |
|
|
std::function<void()>&& init_func) { |
|
|
@ -267,9 +273,9 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ |
|
|
thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func)); |
|
|
thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func)); |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { |
|
|
|
|
|
|
|
|
Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { |
|
|
// Initialize the thread.
|
|
|
// Initialize the thread.
|
|
|
R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); |
|
|
R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); |
|
|
|
|
|
|
|
|
@ -277,34 +283,34 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { |
|
|
thread->m_stack_parameters.disable_count = 0; |
|
|
thread->m_stack_parameters.disable_count = 0; |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { |
|
|
|
|
|
|
|
|
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { |
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
|
|
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); |
|
|
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
|
|
|
|
|
|
|
|
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, |
|
|
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); |
|
|
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
|
|
|
|
|
|
|
|
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
|
|
KThreadFunction func, uintptr_t arg, s32 virt_core) { |
|
|
KThreadFunction func, uintptr_t arg, s32 virt_core) { |
|
|
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, |
|
|
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, |
|
|
ThreadType::HighPriority, |
|
|
ThreadType::HighPriority, |
|
|
system.GetCpuManager().GetShutdownThreadStartFunc())); |
|
|
system.GetCpuManager().GetShutdownThreadStartFunc())); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, |
|
|
|
|
|
|
|
|
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, |
|
|
uintptr_t arg, KProcessAddress user_stack_top, s32 prio, |
|
|
uintptr_t arg, KProcessAddress user_stack_top, s32 prio, |
|
|
s32 virt_core, KProcess* owner) { |
|
|
s32 virt_core, KProcess* owner) { |
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread); |
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread); |
|
|
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
|
|
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
|
|
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); |
|
|
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::InitializeServiceThread(Core::System& system, KThread* thread, |
|
|
|
|
|
|
|
|
Result KThread::InitializeServiceThread(Core::System& system, KThread* thread, |
|
|
std::function<void()>&& func, s32 prio, s32 virt_core, |
|
|
std::function<void()>&& func, s32 prio, s32 virt_core, |
|
|
KProcess* owner) { |
|
|
KProcess* owner) { |
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread); |
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread); |
|
|
@ -321,9 +327,9 @@ Result KThread::InitializeServiceThread(Core::System& system, KThread* thread, |
|
|
|
|
|
|
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, prio, virt_core, owner, ThreadType::HighPriority, |
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, prio, virt_core, owner, ThreadType::HighPriority, |
|
|
std::move(func2))); |
|
|
std::move(func2))); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::PostDestroy(uintptr_t arg) { |
|
|
|
|
|
|
|
|
void KThread::PostDestroy(uintptr_t arg) { |
|
|
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL); |
|
|
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL); |
|
|
const bool resource_limit_release_hint = (arg & 1); |
|
|
const bool resource_limit_release_hint = (arg & 1); |
|
|
const s64 hint_value = (resource_limit_release_hint ? 0 : 1); |
|
|
const s64 hint_value = (resource_limit_release_hint ? 0 : 1); |
|
|
@ -331,9 +337,9 @@ void KThread::PostDestroy(uintptr_t arg) { |
|
|
owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value); |
|
|
owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value); |
|
|
owner->Close(); |
|
|
owner->Close(); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Finalize() { |
|
|
|
|
|
|
|
|
void KThread::Finalize() { |
|
|
// If the thread has an owner process, unregister it.
|
|
|
// If the thread has an owner process, unregister it.
|
|
|
if (m_parent != nullptr) { |
|
|
if (m_parent != nullptr) { |
|
|
m_parent->UnregisterThread(this); |
|
|
m_parent->UnregisterThread(this); |
|
|
@ -387,22 +393,22 @@ void KThread::Finalize() { |
|
|
|
|
|
|
|
|
// Perform inherited finalization.
|
|
|
// Perform inherited finalization.
|
|
|
KSynchronizationObject::Finalize(); |
|
|
KSynchronizationObject::Finalize(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
bool KThread::IsSignaled() const { |
|
|
|
|
|
|
|
|
bool KThread::IsSignaled() const { |
|
|
return m_signaled; |
|
|
return m_signaled; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::OnTimer() { |
|
|
|
|
|
|
|
|
void KThread::OnTimer() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// If we're waiting, cancel the wait.
|
|
|
// If we're waiting, cancel the wait.
|
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
m_wait_queue->CancelWait(this, ResultTimedOut, false); |
|
|
m_wait_queue->CancelWait(this, ResultTimedOut, false); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::StartTermination() { |
|
|
|
|
|
|
|
|
void KThread::StartTermination() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Release user exception and unpin, if relevant.
|
|
|
// Release user exception and unpin, if relevant.
|
|
|
@ -426,9 +432,9 @@ void KThread::StartTermination() { |
|
|
|
|
|
|
|
|
// Register terminated dpc flag.
|
|
|
// Register terminated dpc flag.
|
|
|
this->RegisterDpc(DpcFlag::Terminated); |
|
|
this->RegisterDpc(DpcFlag::Terminated); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::FinishTermination() { |
|
|
|
|
|
|
|
|
void KThread::FinishTermination() { |
|
|
// Ensure that the thread is not executing on any core.
|
|
|
// Ensure that the thread is not executing on any core.
|
|
|
if (m_parent != nullptr) { |
|
|
if (m_parent != nullptr) { |
|
|
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { |
|
|
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { |
|
|
@ -448,14 +454,14 @@ void KThread::FinishTermination() { |
|
|
|
|
|
|
|
|
// Close the thread.
|
|
|
// Close the thread.
|
|
|
this->Close(); |
|
|
this->Close(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::DoWorkerTaskImpl() { |
|
|
|
|
|
|
|
|
void KThread::DoWorkerTaskImpl() { |
|
|
// Finish the termination that was begun by Exit().
|
|
|
// Finish the termination that was begun by Exit().
|
|
|
this->FinishTermination(); |
|
|
this->FinishTermination(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Pin(s32 current_core) { |
|
|
|
|
|
|
|
|
void KThread::Pin(s32 current_core) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Set ourselves as pinned.
|
|
|
// Set ourselves as pinned.
|
|
|
@ -497,9 +503,9 @@ void KThread::Pin(s32 current_core) { |
|
|
|
|
|
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
|
|
ASSERT(m_parent != nullptr); |
|
|
ASSERT(m_parent != nullptr); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Unpin() { |
|
|
|
|
|
|
|
|
void KThread::Unpin() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Set ourselves as unpinned.
|
|
|
// Set ourselves as unpinned.
|
|
|
@ -550,9 +556,9 @@ void KThread::Unpin() { |
|
|
it = m_pinned_waiter_list.erase(it)) { |
|
|
it = m_pinned_waiter_list.erase(it)) { |
|
|
it->EndWait(ResultSuccess); |
|
|
it->EndWait(ResultSuccess); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
u16 KThread::GetUserDisableCount() const { |
|
|
|
|
|
|
|
|
u16 KThread::GetUserDisableCount() const { |
|
|
if (!this->IsUserThread()) { |
|
|
if (!this->IsUserThread()) { |
|
|
// We only emulate TLS for user threads
|
|
|
// We only emulate TLS for user threads
|
|
|
return {}; |
|
|
return {}; |
|
|
@ -560,9 +566,9 @@ u16 KThread::GetUserDisableCount() const { |
|
|
|
|
|
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count)); |
|
|
return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count)); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::SetInterruptFlag() { |
|
|
|
|
|
|
|
|
void KThread::SetInterruptFlag() { |
|
|
if (!this->IsUserThread()) { |
|
|
if (!this->IsUserThread()) { |
|
|
// We only emulate TLS for user threads
|
|
|
// We only emulate TLS for user threads
|
|
|
return; |
|
|
return; |
|
|
@ -570,9 +576,9 @@ void KThread::SetInterruptFlag() { |
|
|
|
|
|
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); |
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::ClearInterruptFlag() { |
|
|
|
|
|
|
|
|
void KThread::ClearInterruptFlag() { |
|
|
if (!this->IsUserThread()) { |
|
|
if (!this->IsUserThread()) { |
|
|
// We only emulate TLS for user threads
|
|
|
// We only emulate TLS for user threads
|
|
|
return; |
|
|
return; |
|
|
@ -580,9 +586,22 @@ void KThread::ClearInterruptFlag() { |
|
|
|
|
|
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); |
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void KThread::UpdateTlsThreadCpuTime(s64 switch_tick) { |
|
|
|
|
|
if (!this->IsUserThread()) { |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
if (m_tls_address == 0) { |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
|
|
|
|
|
|
|
|
const s64 value = this->GetCpuTime() - switch_tick; |
|
|
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory(); |
|
|
|
|
|
memory.Write64(m_tls_address + offsetof(ThreadLocalRegion, thread_cpu_time), static_cast<u64>(value)); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
|
|
|
|
|
|
// Get the virtual mask.
|
|
|
// Get the virtual mask.
|
|
|
@ -590,9 +609,9 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
|
|
*out_affinity_mask = m_virtual_affinity_mask; |
|
|
*out_affinity_mask = m_virtual_affinity_mask; |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
|
|
|
|
|
|
|
|
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
ASSERT(m_num_core_migration_disables >= 0); |
|
|
ASSERT(m_num_core_migration_disables >= 0); |
|
|
|
|
|
|
|
|
@ -606,9 +625,9 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { |
|
|
|
|
|
|
|
|
Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { |
|
|
ASSERT(m_parent != nullptr); |
|
|
ASSERT(m_parent != nullptr); |
|
|
ASSERT(v_affinity_mask != 0); |
|
|
ASSERT(v_affinity_mask != 0); |
|
|
KScopedLightLock lk(m_activity_pause_lock); |
|
|
KScopedLightLock lk(m_activity_pause_lock); |
|
|
@ -718,9 +737,9 @@ Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::SetBasePriority(s32 value) { |
|
|
|
|
|
|
|
|
void KThread::SetBasePriority(s32 value) { |
|
|
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); |
|
|
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); |
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
@ -730,13 +749,13 @@ void KThread::SetBasePriority(s32 value) { |
|
|
|
|
|
|
|
|
// Perform a priority restoration.
|
|
|
// Perform a priority restoration.
|
|
|
RestorePriority(m_kernel, this); |
|
|
RestorePriority(m_kernel, this); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KThread* KThread::GetLockOwner() const { |
|
|
|
|
|
|
|
|
KThread* KThread::GetLockOwner() const { |
|
|
return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; |
|
|
return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::IncreaseBasePriority(s32 priority) { |
|
|
|
|
|
|
|
|
void KThread::IncreaseBasePriority(s32 priority) { |
|
|
ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); |
|
|
ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(!this->GetStackParameters().is_pinned); |
|
|
ASSERT(!this->GetStackParameters().is_pinned); |
|
|
@ -748,9 +767,9 @@ void KThread::IncreaseBasePriority(s32 priority) { |
|
|
// Perform a priority restoration.
|
|
|
// Perform a priority restoration.
|
|
|
RestorePriority(m_kernel, this); |
|
|
RestorePriority(m_kernel, this); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::RequestSuspend(SuspendType type) { |
|
|
|
|
|
|
|
|
void KThread::RequestSuspend(SuspendType type) { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
|
|
|
|
|
|
// Note the request in our flags.
|
|
|
// Note the request in our flags.
|
|
|
@ -759,9 +778,9 @@ void KThread::RequestSuspend(SuspendType type) { |
|
|
|
|
|
|
|
|
// Try to perform the suspend.
|
|
|
// Try to perform the suspend.
|
|
|
this->TrySuspend(); |
|
|
this->TrySuspend(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Resume(SuspendType type) { |
|
|
|
|
|
|
|
|
void KThread::Resume(SuspendType type) { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
|
|
|
|
|
|
// Clear the request in our flags.
|
|
|
// Clear the request in our flags.
|
|
|
@ -770,9 +789,9 @@ void KThread::Resume(SuspendType type) { |
|
|
|
|
|
|
|
|
// Update our state.
|
|
|
// Update our state.
|
|
|
this->UpdateState(); |
|
|
this->UpdateState(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::WaitCancel() { |
|
|
|
|
|
|
|
|
void KThread::WaitCancel() { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
|
|
|
|
|
|
// Check if we're waiting and cancellable.
|
|
|
// Check if we're waiting and cancellable.
|
|
|
@ -783,9 +802,9 @@ void KThread::WaitCancel() { |
|
|
// Otherwise, note that we cancelled a wait.
|
|
|
// Otherwise, note that we cancelled a wait.
|
|
|
m_wait_cancelled = true; |
|
|
m_wait_cancelled = true; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::TrySuspend() { |
|
|
|
|
|
|
|
|
void KThread::TrySuspend() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(this->IsSuspendRequested()); |
|
|
ASSERT(this->IsSuspendRequested()); |
|
|
|
|
|
|
|
|
@ -797,9 +816,9 @@ void KThread::TrySuspend() { |
|
|
|
|
|
|
|
|
// Perform the suspend.
|
|
|
// Perform the suspend.
|
|
|
this->UpdateState(); |
|
|
this->UpdateState(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::UpdateState() { |
|
|
|
|
|
|
|
|
void KThread::UpdateState() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Set our suspend flags in state.
|
|
|
// Set our suspend flags in state.
|
|
|
@ -812,9 +831,9 @@ void KThread::UpdateState() { |
|
|
if (new_state != old_state) { |
|
|
if (new_state != old_state) { |
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Continue() { |
|
|
|
|
|
|
|
|
void KThread::Continue() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Clear our suspend flags in state.
|
|
|
// Clear our suspend flags in state.
|
|
|
@ -823,17 +842,17 @@ void KThread::Continue() { |
|
|
|
|
|
|
|
|
// Note the state change in scheduler.
|
|
|
// Note the state change in scheduler.
|
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::CloneFpuStatus() { |
|
|
|
|
|
|
|
|
void KThread::CloneFpuStatus() { |
|
|
// We shouldn't reach here when starting kernel threads.
|
|
|
// We shouldn't reach here when starting kernel threads.
|
|
|
ASSERT(this->GetOwnerProcess() != nullptr); |
|
|
ASSERT(this->GetOwnerProcess() != nullptr); |
|
|
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel)); |
|
|
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel)); |
|
|
|
|
|
|
|
|
m_kernel.CurrentPhysicalCore().CloneFpuStatus(this); |
|
|
m_kernel.CurrentPhysicalCore().CloneFpuStatus(this); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::SetActivity(Svc::ThreadActivity activity) { |
|
|
|
|
|
|
|
|
Result KThread::SetActivity(Svc::ThreadActivity activity) { |
|
|
// Lock ourselves.
|
|
|
// Lock ourselves.
|
|
|
KScopedLightLock lk(m_activity_pause_lock); |
|
|
KScopedLightLock lk(m_activity_pause_lock); |
|
|
|
|
|
|
|
|
@ -904,9 +923,9 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::GetThreadContext3(Svc::ThreadContext* out) { |
|
|
|
|
|
|
|
|
Result KThread::GetThreadContext3(Svc::ThreadContext* out) { |
|
|
// Lock ourselves.
|
|
|
// Lock ourselves.
|
|
|
KScopedLightLock lk{m_activity_pause_lock}; |
|
|
KScopedLightLock lk{m_activity_pause_lock}; |
|
|
|
|
|
|
|
|
@ -935,9 +954,9 @@ Result KThread::GetThreadContext3(Svc::ThreadContext* out) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { |
|
|
|
|
|
|
|
|
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Set ourselves as the lock's owner.
|
|
|
// Set ourselves as the lock's owner.
|
|
|
@ -945,9 +964,9 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { |
|
|
|
|
|
|
|
|
// Add the lock to our held list.
|
|
|
// Add the lock to our held list.
|
|
|
m_held_lock_info_list.push_front(*lock_info); |
|
|
m_held_lock_info_list.push_front(*lock_info); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key, |
|
|
|
|
|
|
|
|
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key, |
|
|
bool is_kernel_address_key) { |
|
|
bool is_kernel_address_key) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
@ -960,9 +979,9 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
return nullptr; |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread* thread) { |
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread* thread) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(thread->GetConditionVariableTree() == nullptr); |
|
|
ASSERT(thread->GetConditionVariableTree() == nullptr); |
|
|
|
|
|
|
|
|
@ -989,9 +1008,9 @@ void KThread::AddWaiterImpl(KThread* thread) { |
|
|
|
|
|
|
|
|
// Add the thread as waiter to the lock info.
|
|
|
// Add the thread as waiter to the lock info.
|
|
|
lock_info->AddWaiter(thread); |
|
|
lock_info->AddWaiter(thread); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) { |
|
|
|
|
|
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
@ -1009,9 +1028,9 @@ void KThread::RemoveWaiterImpl(KThread* thread) { |
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); |
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); |
|
|
LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); |
|
|
LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { |
|
|
|
|
|
|
|
|
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
|
|
|
|
|
|
|
|
while (thread != nullptr) { |
|
|
while (thread != nullptr) { |
|
|
@ -1061,18 +1080,18 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { |
|
|
// Continue inheriting priority.
|
|
|
// Continue inheriting priority.
|
|
|
thread = lock_owner; |
|
|
thread = lock_owner; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::AddWaiter(KThread* thread) { |
|
|
|
|
|
|
|
|
void KThread::AddWaiter(KThread* thread) { |
|
|
this->AddWaiterImpl(thread); |
|
|
this->AddWaiterImpl(thread); |
|
|
|
|
|
|
|
|
// If the thread has a higher priority than us, we should inherit.
|
|
|
// If the thread has a higher priority than us, we should inherit.
|
|
|
if (thread->GetPriority() < this->GetPriority()) { |
|
|
if (thread->GetPriority() < this->GetPriority()) { |
|
|
RestorePriority(m_kernel, this); |
|
|
RestorePriority(m_kernel, this); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::RemoveWaiter(KThread* thread) { |
|
|
|
|
|
|
|
|
void KThread::RemoveWaiter(KThread* thread) { |
|
|
this->RemoveWaiterImpl(thread); |
|
|
this->RemoveWaiterImpl(thread); |
|
|
|
|
|
|
|
|
// If our priority is the same as the thread's (and we've inherited), we may need to restore to
|
|
|
// If our priority is the same as the thread's (and we've inherited), we may need to restore to
|
|
|
@ -1081,9 +1100,9 @@ void KThread::RemoveWaiter(KThread* thread) { |
|
|
this->GetPriority() < this->GetBasePriority()) { |
|
|
this->GetPriority() < this->GetBasePriority()) { |
|
|
RestorePriority(m_kernel, this); |
|
|
RestorePriority(m_kernel, this); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key, |
|
|
|
|
|
|
|
|
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key, |
|
|
bool is_kernel_address_key_) { |
|
|
bool is_kernel_address_key_) { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
|
|
|
|
|
|
@ -1142,9 +1161,9 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key, |
|
|
|
|
|
|
|
|
// Return the next lock owner.
|
|
|
// Return the next lock owner.
|
|
|
return next_lock_owner; |
|
|
return next_lock_owner; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::Run() { |
|
|
|
|
|
|
|
|
Result KThread::Run() { |
|
|
while (true) { |
|
|
while (true) { |
|
|
KScopedSchedulerLock lk{m_kernel}; |
|
|
KScopedSchedulerLock lk{m_kernel}; |
|
|
|
|
|
|
|
|
@ -1177,9 +1196,9 @@ Result KThread::Run() { |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::Exit() { |
|
|
|
|
|
|
|
|
void KThread::Exit() { |
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
|
|
|
|
|
|
|
|
// Release the thread resource hint, running thread count from parent.
|
|
|
// Release the thread resource hint, running thread count from parent.
|
|
|
@ -1208,9 +1227,9 @@ void KThread::Exit() { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
UNREACHABLE_MSG("KThread::Exit() would return"); |
|
|
UNREACHABLE_MSG("KThread::Exit() would return"); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::Terminate() { |
|
|
|
|
|
|
|
|
Result KThread::Terminate() { |
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel)); |
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel)); |
|
|
|
|
|
|
|
|
// Request the thread terminate if it hasn't already.
|
|
|
// Request the thread terminate if it hasn't already.
|
|
|
@ -1223,9 +1242,9 @@ Result KThread::Terminate() { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
ThreadState KThread::RequestTerminate() { |
|
|
|
|
|
|
|
|
ThreadState KThread::RequestTerminate() { |
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel)); |
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel)); |
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
@ -1278,9 +1297,9 @@ ThreadState KThread::RequestTerminate() { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
return this->GetState(); |
|
|
return this->GetState(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Result KThread::Sleep(s64 timeout) { |
|
|
|
|
|
|
|
|
Result KThread::Sleep(s64 timeout) { |
|
|
ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
|
|
ASSERT(timeout > 0); |
|
|
ASSERT(timeout > 0); |
|
|
@ -1304,18 +1323,18 @@ Result KThread::Sleep(s64 timeout) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
R_SUCCEED(); |
|
|
R_SUCCEED(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::RequestDummyThreadWait() { |
|
|
|
|
|
|
|
|
void KThread::RequestDummyThreadWait() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(this->IsDummyThread()); |
|
|
ASSERT(this->IsDummyThread()); |
|
|
|
|
|
|
|
|
// We will block when the scheduler lock is released.
|
|
|
// We will block when the scheduler lock is released.
|
|
|
std::scoped_lock lock{m_dummy_thread_mutex}; |
|
|
std::scoped_lock lock{m_dummy_thread_mutex}; |
|
|
m_dummy_thread_runnable = false; |
|
|
m_dummy_thread_runnable = false; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::DummyThreadBeginWait() { |
|
|
|
|
|
|
|
|
void KThread::DummyThreadBeginWait() { |
|
|
if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) { |
|
|
if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) { |
|
|
// Occurs in single core mode.
|
|
|
// Occurs in single core mode.
|
|
|
return; |
|
|
return; |
|
|
@ -1324,9 +1343,9 @@ void KThread::DummyThreadBeginWait() { |
|
|
// Block until runnable is no longer false.
|
|
|
// Block until runnable is no longer false.
|
|
|
std::unique_lock lock{m_dummy_thread_mutex}; |
|
|
std::unique_lock lock{m_dummy_thread_mutex}; |
|
|
m_dummy_thread_cv.wait(lock, [this] { return m_dummy_thread_runnable; }); |
|
|
m_dummy_thread_cv.wait(lock, [this] { return m_dummy_thread_runnable; }); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::DummyThreadEndWait() { |
|
|
|
|
|
|
|
|
void KThread::DummyThreadEndWait() { |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
|
|
ASSERT(this->IsDummyThread()); |
|
|
ASSERT(this->IsDummyThread()); |
|
|
|
|
|
|
|
|
@ -1336,17 +1355,17 @@ void KThread::DummyThreadEndWait() { |
|
|
m_dummy_thread_runnable = true; |
|
|
m_dummy_thread_runnable = true; |
|
|
} |
|
|
} |
|
|
m_dummy_thread_cv.notify_one(); |
|
|
m_dummy_thread_cv.notify_one(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::BeginWait(KThreadQueue* queue) { |
|
|
|
|
|
|
|
|
void KThread::BeginWait(KThreadQueue* queue) { |
|
|
// Set our state as waiting.
|
|
|
// Set our state as waiting.
|
|
|
this->SetState(ThreadState::Waiting); |
|
|
this->SetState(ThreadState::Waiting); |
|
|
|
|
|
|
|
|
// Set our wait queue.
|
|
|
// Set our wait queue.
|
|
|
m_wait_queue = queue; |
|
|
m_wait_queue = queue; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) { |
|
|
|
|
|
|
|
|
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) { |
|
|
// Lock the scheduler.
|
|
|
// Lock the scheduler.
|
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
|
|
|
|
|
|
@ -1354,9 +1373,9 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wa |
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result); |
|
|
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::EndWait(Result wait_result) { |
|
|
|
|
|
|
|
|
void KThread::EndWait(Result wait_result) { |
|
|
// Lock the scheduler.
|
|
|
// Lock the scheduler.
|
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
|
|
|
|
|
|
@ -1370,9 +1389,9 @@ void KThread::EndWait(Result wait_result) { |
|
|
|
|
|
|
|
|
m_wait_queue->EndWait(this, wait_result); |
|
|
m_wait_queue->EndWait(this, wait_result); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { |
|
|
|
|
|
|
|
|
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { |
|
|
// Lock the scheduler.
|
|
|
// Lock the scheduler.
|
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
KScopedSchedulerLock sl(m_kernel); |
|
|
|
|
|
|
|
|
@ -1380,9 +1399,9 @@ void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { |
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
if (this->GetState() == ThreadState::Waiting) { |
|
|
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task); |
|
|
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void KThread::SetState(ThreadState state) { |
|
|
|
|
|
|
|
|
void KThread::SetState(ThreadState state) { |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
KScopedSchedulerLock sl{m_kernel}; |
|
|
|
|
|
|
|
|
// Clear debugging state
|
|
|
// Clear debugging state
|
|
|
@ -1395,41 +1414,41 @@ void KThread::SetState(ThreadState state) { |
|
|
if (m_thread_state.load(std::memory_order_relaxed) != old_state) { |
|
|
if (m_thread_state.load(std::memory_order_relaxed) != old_state) { |
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { |
|
|
|
|
|
|
|
|
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { |
|
|
return m_host_context; |
|
|
return m_host_context; |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void SetCurrentThread(KernelCore& kernel, KThread* thread) { |
|
|
|
|
|
|
|
|
void SetCurrentThread(KernelCore& kernel, KThread* thread) { |
|
|
kernel.SetCurrentEmuThread(thread); |
|
|
kernel.SetCurrentEmuThread(thread); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KThread* GetCurrentThreadPointer(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
KThread* GetCurrentThreadPointer(KernelCore& kernel) { |
|
|
return kernel.GetCurrentEmuThread(); |
|
|
return kernel.GetCurrentEmuThread(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KThread& GetCurrentThread(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
KThread& GetCurrentThread(KernelCore& kernel) { |
|
|
return *GetCurrentThreadPointer(kernel); |
|
|
return *GetCurrentThreadPointer(kernel); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KProcess* GetCurrentProcessPointer(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
KProcess* GetCurrentProcessPointer(KernelCore& kernel) { |
|
|
return GetCurrentThread(kernel).GetOwnerProcess(); |
|
|
return GetCurrentThread(kernel).GetOwnerProcess(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KProcess& GetCurrentProcess(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
KProcess& GetCurrentProcess(KernelCore& kernel) { |
|
|
return *GetCurrentProcessPointer(kernel); |
|
|
return *GetCurrentProcessPointer(kernel); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
s32 GetCurrentCoreId(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
s32 GetCurrentCoreId(KernelCore& kernel) { |
|
|
return GetCurrentThread(kernel).GetCurrentCore(); |
|
|
return GetCurrentThread(kernel).GetCurrentCore(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) { |
|
|
|
|
|
|
|
|
Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) { |
|
|
return GetCurrentProcess(kernel).GetMemory(); |
|
|
return GetCurrentProcess(kernel).GetMemory(); |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
KScopedDisableDispatch::~KScopedDisableDispatch() { |
|
|
|
|
|
|
|
|
KScopedDisableDispatch::~KScopedDisableDispatch() { |
|
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
|
if (m_kernel.IsShuttingDown()) { |
|
|
if (m_kernel.IsShuttingDown()) { |
|
|
return; |
|
|
return; |
|
|
@ -1446,6 +1465,5 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { |
|
|
} else { |
|
|
} else { |
|
|
GetCurrentThread(m_kernel).EnableDispatch(); |
|
|
GetCurrentThread(m_kernel).EnableDispatch(); |
|
|
} |
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} // namespace Kernel
|
|
|
|