Browse Source
hle: kernel: k_memory_layout: Derive memory regions based on board layout.
pull/15/merge
hle: kernel: k_memory_layout: Derive memory regions based on board layout.
pull/15/merge
6 changed files with 1033 additions and 56 deletions
-
2src/core/CMakeLists.txt
-
199src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
-
183src/core/hle/kernel/k_memory_layout.cpp
-
384src/core/hle/kernel/k_memory_layout.h
-
319src/core/hle/kernel/kernel.cpp
-
2src/core/hle/kernel/kernel.h
@ -0,0 +1,199 @@ |
|||
// Copyright 2021 yuzu Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include "common/alignment.h"
|
|||
#include "core/hle/kernel/k_memory_layout.h"
|
|||
#include "core/hle/kernel/k_memory_manager.h"
|
|||
#include "core/hle/kernel/k_system_control.h"
|
|||
#include "core/hle/kernel/k_trace.h"
|
|||
|
|||
namespace Kernel { |
|||
|
|||
namespace { |
|||
|
|||
constexpr size_t CarveoutAlignment = 0x20000; |
|||
constexpr size_t CarveoutSizeMax = (512ULL * 1024 * 1024) - CarveoutAlignment; |
|||
|
|||
bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) { |
|||
// Above firmware 2.0.0, the PMC is not mappable.
|
|||
return memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) && |
|||
memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x7000E400, 0xC00, |
|||
KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap); |
|||
} |
|||
|
|||
void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size, |
|||
KMemoryRegionType phys_type, |
|||
KMemoryRegionType virt_type, u32& cur_attr) { |
|||
const u32 attr = cur_attr++; |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size, |
|||
static_cast<u32>(phys_type), attr)); |
|||
const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( |
|||
static_cast<u32>(phys_type), attr); |
|||
ASSERT(phys != nullptr); |
|||
ASSERT(phys->GetEndAddress() != 0); |
|||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, |
|||
static_cast<u32>(virt_type), attr)); |
|||
} |
|||
|
|||
} // namespace
|
|||
|
|||
namespace Init { |
|||
|
|||
void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) { |
|||
ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x50041000, 0x1000, |
|||
KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x50042000, 0x1000, |
|||
KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); |
|||
|
|||
// Map IRAM unconditionally, to support debug-logging-to-iram build config.
|
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap)); |
|||
|
|||
// Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region.
|
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); |
|||
} |
|||
|
|||
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) { |
|||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); |
|||
const PAddr physical_memory_base_address = |
|||
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); |
|||
|
|||
// Insert blocks into the tree.
|
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram)); |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); |
|||
|
|||
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
|
|||
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0); |
|||
if constexpr (IsKTraceEnabled) { |
|||
const PAddr ktrace_buffer_phys_addr = |
|||
physical_memory_base_address + intended_memory_size - KTraceBufferSize; |
|||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( |
|||
ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); |
|||
} |
|||
} |
|||
|
|||
void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) { |
|||
// Start by identifying the extents of the DRAM memory region.
|
|||
const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents(); |
|||
ASSERT(dram_extents.GetEndAddress() != 0); |
|||
|
|||
// Determine the end of the pool region.
|
|||
const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; |
|||
|
|||
// Find the start of the kernel DRAM region.
|
|||
const KMemoryRegion* kernel_dram_region = |
|||
memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived( |
|||
KMemoryRegionType_DramKernelBase); |
|||
ASSERT(kernel_dram_region != nullptr); |
|||
|
|||
const u64 kernel_dram_start = kernel_dram_region->GetAddress(); |
|||
ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment)); |
|||
|
|||
// Find the start of the pool partitions region.
|
|||
const KMemoryRegion* pool_partitions_region = |
|||
memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( |
|||
KMemoryRegionType_DramPoolPartition, 0); |
|||
ASSERT(pool_partitions_region != nullptr); |
|||
const u64 pool_partitions_start = pool_partitions_region->GetAddress(); |
|||
|
|||
// Setup the pool partition layouts.
|
|||
// On 5.0.0+, setup modern 4-pool-partition layout.
|
|||
|
|||
// Get Application and Applet pool sizes.
|
|||
const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); |
|||
const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); |
|||
const size_t unsafe_system_pool_min_size = |
|||
KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); |
|||
|
|||
// Decide on starting addresses for our pools.
|
|||
const u64 application_pool_start = pool_end - application_pool_size; |
|||
const u64 applet_pool_start = application_pool_start - applet_pool_size; |
|||
const u64 unsafe_system_pool_start = std::min( |
|||
kernel_dram_start + CarveoutSizeMax, |
|||
Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); |
|||
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; |
|||
|
|||
// We want to arrange application pool depending on where the middle of dram is.
|
|||
const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; |
|||
u32 cur_pool_attr = 0; |
|||
size_t total_overhead_size = 0; |
|||
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { |
|||
InsertPoolPartitionRegionIntoBothTrees( |
|||
memory_layout, application_pool_start, application_pool_size, |
|||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, |
|||
cur_pool_attr); |
|||
total_overhead_size += |
|||
KMemoryManager::CalculateManagementOverheadSize(application_pool_size); |
|||
} else { |
|||
const size_t first_application_pool_size = dram_midpoint - application_pool_start; |
|||
const size_t second_application_pool_size = |
|||
application_pool_start + application_pool_size - dram_midpoint; |
|||
InsertPoolPartitionRegionIntoBothTrees( |
|||
memory_layout, application_pool_start, first_application_pool_size, |
|||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, |
|||
cur_pool_attr); |
|||
InsertPoolPartitionRegionIntoBothTrees( |
|||
memory_layout, dram_midpoint, second_application_pool_size, |
|||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, |
|||
cur_pool_attr); |
|||
total_overhead_size += |
|||
KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size); |
|||
total_overhead_size += |
|||
KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size); |
|||
} |
|||
|
|||
// Insert the applet pool.
|
|||
InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size, |
|||
KMemoryRegionType_DramAppletPool, |
|||
KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); |
|||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size); |
|||
|
|||
// Insert the nonsecure system pool.
|
|||
InsertPoolPartitionRegionIntoBothTrees( |
|||
memory_layout, unsafe_system_pool_start, unsafe_system_pool_size, |
|||
KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, |
|||
cur_pool_attr); |
|||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size); |
|||
|
|||
// Insert the pool management region.
|
|||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize( |
|||
(unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); |
|||
const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size; |
|||
const size_t pool_management_size = total_overhead_size; |
|||
u32 pool_management_attr = 0; |
|||
InsertPoolPartitionRegionIntoBothTrees( |
|||
memory_layout, pool_management_start, pool_management_size, |
|||
KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, |
|||
pool_management_attr); |
|||
|
|||
// Insert the system pool.
|
|||
const u64 system_pool_size = pool_management_start - pool_partitions_start; |
|||
InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size, |
|||
KMemoryRegionType_DramSystemPool, |
|||
KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); |
|||
} |
|||
|
|||
} // namespace Init
|
|||
|
|||
} // namespace Kernel
|
|||
@ -0,0 +1,183 @@ |
|||
// Copyright 2021 yuzu Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include "common/alignment.h"
|
|||
#include "core/hle/kernel/k_memory_layout.h"
|
|||
#include "core/hle/kernel/k_system_control.h"
|
|||
|
|||
namespace Kernel { |
|||
|
|||
namespace { |
|||
|
|||
class KMemoryRegionAllocator final : NonCopyable { |
|||
public: |
|||
static constexpr size_t MaxMemoryRegions = 200; |
|||
|
|||
private: |
|||
KMemoryRegion region_heap[MaxMemoryRegions]{}; |
|||
size_t num_regions{}; |
|||
|
|||
public: |
|||
constexpr KMemoryRegionAllocator() = default; |
|||
|
|||
public: |
|||
template <typename... Args> |
|||
KMemoryRegion* Allocate(Args&&... args) { |
|||
// Ensure we stay within the bounds of our heap.
|
|||
ASSERT(this->num_regions < MaxMemoryRegions); |
|||
|
|||
// Create the new region.
|
|||
KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); |
|||
new (region) KMemoryRegion(std::forward<Args>(args)...); |
|||
|
|||
return region; |
|||
} |
|||
}; |
|||
|
|||
KMemoryRegionAllocator g_memory_region_allocator; |
|||
|
|||
template <typename... Args> |
|||
KMemoryRegion* AllocateRegion(Args&&... args) { |
|||
return g_memory_region_allocator.Allocate(std::forward<Args>(args)...); |
|||
} |
|||
|
|||
} // namespace
|
|||
|
|||
void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { |
|||
this->insert(*AllocateRegion(address, last_address, attr, type_id)); |
|||
} |
|||
|
|||
bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { |
|||
// Locate the memory region that contains the address.
|
|||
KMemoryRegion* found = this->FindModifiable(address); |
|||
|
|||
// We require that the old attr is correct.
|
|||
if (found->GetAttributes() != old_attr) { |
|||
return false; |
|||
} |
|||
|
|||
// We further require that the region can be split from the old region.
|
|||
const u64 inserted_region_end = address + size; |
|||
const u64 inserted_region_last = inserted_region_end - 1; |
|||
if (found->GetLastAddress() < inserted_region_last) { |
|||
return false; |
|||
} |
|||
|
|||
// Further, we require that the type id is a valid transformation.
|
|||
if (!found->CanDerive(type_id)) { |
|||
return false; |
|||
} |
|||
|
|||
// Cache information from the region before we remove it.
|
|||
const u64 old_address = found->GetAddress(); |
|||
const u64 old_last = found->GetLastAddress(); |
|||
const u64 old_pair = found->GetPairAddress(); |
|||
const u32 old_type = found->GetType(); |
|||
|
|||
// Erase the existing region from the tree.
|
|||
this->erase(this->iterator_to(*found)); |
|||
|
|||
// Insert the new region into the tree.
|
|||
if (old_address == address) { |
|||
// Reuse the old object for the new region, if we can.
|
|||
found->Reset(address, inserted_region_last, old_pair, new_attr, type_id); |
|||
this->insert(*found); |
|||
} else { |
|||
// If we can't re-use, adjust the old region.
|
|||
found->Reset(old_address, address - 1, old_pair, old_attr, old_type); |
|||
this->insert(*found); |
|||
|
|||
// Insert a new region for the split.
|
|||
const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) |
|||
? old_pair + (address - old_address) |
|||
: old_pair; |
|||
this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id)); |
|||
} |
|||
|
|||
// If we need to insert a region after the region, do so.
|
|||
if (old_last != inserted_region_last) { |
|||
const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) |
|||
? old_pair + (inserted_region_end - old_address) |
|||
: old_pair; |
|||
this->insert( |
|||
*AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type)); |
|||
} |
|||
|
|||
return true; |
|||
} |
|||
|
|||
VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { |
|||
// We want to find the total extents of the type id.
|
|||
const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); |
|||
|
|||
// Ensure that our alignment is correct.
|
|||
ASSERT(Common::IsAligned(extents.GetAddress(), alignment)); |
|||
|
|||
const u64 first_address = extents.GetAddress(); |
|||
const u64 last_address = extents.GetLastAddress(); |
|||
|
|||
const u64 first_index = first_address / alignment; |
|||
const u64 last_index = last_address / alignment; |
|||
|
|||
while (true) { |
|||
const u64 candidate = |
|||
KSystemControl::GenerateRandomRange(first_index, last_index) * alignment; |
|||
|
|||
// Ensure that the candidate doesn't overflow with the size.
|
|||
if (!(candidate < candidate + size)) { |
|||
continue; |
|||
} |
|||
|
|||
const u64 candidate_last = candidate + size - 1; |
|||
|
|||
// Ensure that the candidate fits within the region.
|
|||
if (candidate_last > last_address) { |
|||
continue; |
|||
} |
|||
|
|||
// Locate the candidate region, and ensure it fits and has the correct type id.
|
|||
if (const auto& candidate_region = *this->Find(candidate); |
|||
!(candidate_last <= candidate_region.GetLastAddress() && |
|||
candidate_region.GetType() == type_id)) { |
|||
continue; |
|||
} |
|||
|
|||
return candidate; |
|||
} |
|||
} |
|||
|
|||
void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, |
|||
VAddr linear_virtual_start) { |
|||
// Set static differences.
|
|||
linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; |
|||
linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; |
|||
|
|||
// Initialize linear trees.
|
|||
for (auto& region : GetPhysicalMemoryRegionTree()) { |
|||
if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { |
|||
GetPhysicalLinearMemoryRegionTree().InsertDirectly( |
|||
region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), |
|||
region.GetType()); |
|||
} |
|||
} |
|||
|
|||
for (auto& region : GetVirtualMemoryRegionTree()) { |
|||
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { |
|||
GetVirtualLinearMemoryRegionTree().InsertDirectly( |
|||
region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), |
|||
region.GetType()); |
|||
} |
|||
} |
|||
} |
|||
|
|||
size_t KMemoryLayout::GetResourceRegionSizeForInit() { |
|||
// Calculate resource region size based on whether we allow extra threads.
|
|||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); |
|||
size_t resource_region_size = |
|||
KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); |
|||
|
|||
return resource_region_size; |
|||
} |
|||
|
|||
} // namespace Kernel
|
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue