|
|
@ -7,10 +7,12 @@ |
|
|
#include "common/assert.h"
|
|
|
#include "common/assert.h"
|
|
|
#include "common/common_funcs.h"
|
|
|
#include "common/common_funcs.h"
|
|
|
#include "common/logging/log.h"
|
|
|
#include "common/logging/log.h"
|
|
|
|
|
|
#include "core/core.h"
|
|
|
#include "core/hle/kernel/errors.h"
|
|
|
#include "core/hle/kernel/errors.h"
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
|
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
#include "core/hle/kernel/thread.h"
|
|
|
#include "core/hle/kernel/thread.h"
|
|
|
#include "core/hle/kernel/vm_manager.h"
|
|
|
#include "core/hle/kernel/vm_manager.h"
|
|
|
#include "core/memory.h"
|
|
|
#include "core/memory.h"
|
|
|
@ -128,6 +130,91 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { |
|
|
Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this); |
|
|
Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void Process::PrepareForTermination() { |
|
|
|
|
|
status = ProcessStatus::Exited; |
|
|
|
|
|
|
|
|
|
|
|
const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { |
|
|
|
|
|
for (auto& thread : thread_list) { |
|
|
|
|
|
if (thread->owner_process != this) |
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
|
|
|
|
if (thread == GetCurrentThread()) |
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
|
|
|
|
// TODO(Subv): When are the other running/ready threads terminated?
|
|
|
|
|
|
ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || |
|
|
|
|
|
thread->status == ThreadStatus::WaitSynchAll, |
|
|
|
|
|
"Exiting processes with non-waiting threads is currently unimplemented"); |
|
|
|
|
|
|
|
|
|
|
|
thread->Stop(); |
|
|
|
|
|
} |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
auto& system = Core::System::GetInstance(); |
|
|
|
|
|
stop_threads(system.Scheduler(0)->GetThreadList()); |
|
|
|
|
|
stop_threads(system.Scheduler(1)->GetThreadList()); |
|
|
|
|
|
stop_threads(system.Scheduler(2)->GetThreadList()); |
|
|
|
|
|
stop_threads(system.Scheduler(3)->GetThreadList()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* Finds a free location for the TLS section of a thread. |
|
|
|
|
|
* @param tls_slots The TLS page array of the thread's owner process. |
|
|
|
|
|
* Returns a tuple of (page, slot, alloc_needed) where: |
|
|
|
|
|
* page: The index of the first allocated TLS page that has free slots. |
|
|
|
|
|
* slot: The index of the first free slot in the indicated page. |
|
|
|
|
|
* alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full). |
|
|
|
|
|
*/ |
|
|
|
|
|
static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot( |
|
|
|
|
|
const std::vector<std::bitset<8>>& tls_slots) { |
|
|
|
|
|
// Iterate over all the allocated pages, and try to find one where not all slots are used.
|
|
|
|
|
|
for (std::size_t page = 0; page < tls_slots.size(); ++page) { |
|
|
|
|
|
const auto& page_tls_slots = tls_slots[page]; |
|
|
|
|
|
if (!page_tls_slots.all()) { |
|
|
|
|
|
// We found a page with at least one free slot, find which slot it is
|
|
|
|
|
|
for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) { |
|
|
|
|
|
if (!page_tls_slots.test(slot)) { |
|
|
|
|
|
return std::make_tuple(page, slot, false); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return std::make_tuple(0, 0, true); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) { |
|
|
|
|
|
auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots); |
|
|
|
|
|
|
|
|
|
|
|
if (needs_allocation) { |
|
|
|
|
|
tls_slots.emplace_back(0); // The page is completely available at the start
|
|
|
|
|
|
available_page = tls_slots.size() - 1; |
|
|
|
|
|
available_slot = 0; // Use the first slot in the new page
|
|
|
|
|
|
|
|
|
|
|
|
// Allocate some memory from the end of the linear heap for this region.
|
|
|
|
|
|
auto& tls_memory = thread.GetTLSMemory(); |
|
|
|
|
|
tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0); |
|
|
|
|
|
|
|
|
|
|
|
vm_manager.RefreshMemoryBlockMappings(tls_memory.get()); |
|
|
|
|
|
|
|
|
|
|
|
vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE, |
|
|
|
|
|
tls_memory, 0, Memory::PAGE_SIZE, MemoryState::ThreadLocal); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tls_slots[available_page].set(available_slot); |
|
|
|
|
|
|
|
|
|
|
|
return Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + |
|
|
|
|
|
available_slot * Memory::TLS_ENTRY_SIZE; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void Process::FreeTLSSlot(VAddr tls_address) { |
|
|
|
|
|
const VAddr tls_base = tls_address - Memory::TLS_AREA_VADDR; |
|
|
|
|
|
const VAddr tls_page = tls_base / Memory::PAGE_SIZE; |
|
|
|
|
|
const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; |
|
|
|
|
|
|
|
|
|
|
|
tls_slots[tls_page].reset(tls_slot); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) { |
|
|
void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) { |
|
|
const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, |
|
|
const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, |
|
|
MemoryState memory_state) { |
|
|
MemoryState memory_state) { |
|
|
|