Browse Source
Merge pull request #9071 from bunnei/mp-mm
Merge pull request #9071 from bunnei/mp-mm
Kernel Multiprocess (Part 1) - Persist memory & core timingnce_cpp
committed by
GitHub
41 changed files with 2441 additions and 1239 deletions
-
3src/core/CMakeLists.txt
-
8src/core/arm/arm_interface.cpp
-
92src/core/core.cpp
-
10src/core/core.h
-
34src/core/core_timing.cpp
-
14src/core/core_timing.h
-
10src/core/device_memory.h
-
6src/core/hle/kernel/init/init_slab_setup.cpp
-
2src/core/hle/kernel/k_code_memory.cpp
-
136src/core/hle/kernel/k_dynamic_page_manager.h
-
58src/core/hle/kernel/k_dynamic_resource_manager.h
-
122src/core/hle/kernel/k_dynamic_slab_heap.h
-
29src/core/hle/kernel/k_interrupt_manager.cpp
-
4src/core/hle/kernel/k_interrupt_manager.h
-
506src/core/hle/kernel/k_memory_block.h
-
409src/core/hle/kernel/k_memory_block_manager.cpp
-
145src/core/hle/kernel/k_memory_block_manager.h
-
2src/core/hle/kernel/k_memory_manager.cpp
-
2src/core/hle/kernel/k_page_buffer.cpp
-
1302src/core/hle/kernel/k_page_table.cpp
-
319src/core/hle/kernel/k_page_table.h
-
112src/core/hle/kernel/k_process.cpp
-
83src/core/hle/kernel/k_process.h
-
2src/core/hle/kernel/k_shared_memory.cpp
-
4src/core/hle/kernel/k_shared_memory.h
-
115src/core/hle/kernel/k_thread.cpp
-
4src/core/hle/kernel/k_thread.h
-
68src/core/hle/kernel/kernel.cpp
-
10src/core/hle/kernel/kernel.h
-
6src/core/hle/kernel/svc.cpp
-
7src/core/hle/kernel/svc_common.h
-
13src/core/hle/kernel/svc_types.h
-
11src/core/hle/result.h
-
4src/core/hle/service/ldr/ldr.cpp
-
3src/core/hle/service/nvdrv/devices/nvmap.cpp
-
6src/core/memory.cpp
-
3src/tests/core/core_timing.cpp
-
7src/video_core/renderer_vulkan/vk_query_cache.cpp
-
4src/yuzu/bootmanager.cpp
-
1src/yuzu/main.cpp
-
4src/yuzu_cmd/yuzu.cpp
@ -0,0 +1,136 @@ |
|||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
|||
// SPDX-License-Identifier: GPL-2.0-or-later |
|||
|
|||
#pragma once |
|||
|
|||
#include "common/alignment.h" |
|||
#include "common/common_types.h" |
|||
#include "core/hle/kernel/k_page_bitmap.h" |
|||
#include "core/hle/kernel/k_spin_lock.h" |
|||
#include "core/hle/kernel/memory_types.h" |
|||
#include "core/hle/kernel/svc_results.h" |
|||
|
|||
namespace Kernel { |
|||
|
|||
class KDynamicPageManager { |
|||
public: |
|||
class PageBuffer { |
|||
private: |
|||
u8 m_buffer[PageSize]; |
|||
}; |
|||
static_assert(sizeof(PageBuffer) == PageSize); |
|||
|
|||
public: |
|||
KDynamicPageManager() = default; |
|||
|
|||
template <typename T> |
|||
T* GetPointer(VAddr addr) { |
|||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
|||
} |
|||
|
|||
template <typename T> |
|||
const T* GetPointer(VAddr addr) const { |
|||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
|||
} |
|||
|
|||
Result Initialize(VAddr addr, size_t sz) { |
|||
// We need to have positive size. |
|||
R_UNLESS(sz > 0, ResultOutOfMemory); |
|||
m_backing_memory.resize(sz); |
|||
|
|||
// Calculate management overhead. |
|||
const size_t management_size = |
|||
KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); |
|||
const size_t allocatable_size = sz - management_size; |
|||
|
|||
// Set tracking fields. |
|||
m_address = addr; |
|||
m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); |
|||
m_count = allocatable_size / sizeof(PageBuffer); |
|||
R_UNLESS(m_count > 0, ResultOutOfMemory); |
|||
|
|||
// Clear the management region. |
|||
u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); |
|||
std::memset(management_ptr, 0, management_size); |
|||
|
|||
// Initialize the bitmap. |
|||
m_page_bitmap.Initialize(management_ptr, m_count); |
|||
|
|||
// Free the pages to the bitmap. |
|||
for (size_t i = 0; i < m_count; i++) { |
|||
// Ensure the freed page is all-zero. |
|||
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |
|||
|
|||
// Set the bit for the free page. |
|||
m_page_bitmap.SetBit(i); |
|||
} |
|||
|
|||
R_SUCCEED(); |
|||
} |
|||
|
|||
VAddr GetAddress() const { |
|||
return m_address; |
|||
} |
|||
size_t GetSize() const { |
|||
return m_size; |
|||
} |
|||
size_t GetUsed() const { |
|||
return m_used; |
|||
} |
|||
size_t GetPeak() const { |
|||
return m_peak; |
|||
} |
|||
size_t GetCount() const { |
|||
return m_count; |
|||
} |
|||
|
|||
PageBuffer* Allocate() { |
|||
// Take the lock. |
|||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
|||
KScopedSpinLock lk(m_lock); |
|||
|
|||
// Find a random free block. |
|||
s64 soffset = m_page_bitmap.FindFreeBlock(true); |
|||
if (soffset < 0) [[unlikely]] { |
|||
return nullptr; |
|||
} |
|||
|
|||
const size_t offset = static_cast<size_t>(soffset); |
|||
|
|||
// Update our tracking. |
|||
m_page_bitmap.ClearBit(offset); |
|||
m_peak = std::max(m_peak, (++m_used)); |
|||
|
|||
return GetPointer<PageBuffer>(m_address) + offset; |
|||
} |
|||
|
|||
void Free(PageBuffer* pb) { |
|||
// Ensure all pages in the heap are zero. |
|||
std::memset(pb, 0, PageSize); |
|||
|
|||
// Take the lock. |
|||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
|||
KScopedSpinLock lk(m_lock); |
|||
|
|||
// Set the bit for the free page. |
|||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); |
|||
m_page_bitmap.SetBit(offset); |
|||
|
|||
// Decrement our used count. |
|||
--m_used; |
|||
} |
|||
|
|||
private: |
|||
KSpinLock m_lock; |
|||
KPageBitmap m_page_bitmap; |
|||
size_t m_used{}; |
|||
size_t m_peak{}; |
|||
size_t m_count{}; |
|||
VAddr m_address{}; |
|||
size_t m_size{}; |
|||
|
|||
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space. |
|||
std::vector<u8> m_backing_memory; |
|||
}; |
|||
|
|||
} // namespace Kernel |
|||
@ -0,0 +1,58 @@ |
|||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
|||
// SPDX-License-Identifier: GPL-2.0-or-later |
|||
|
|||
#pragma once |
|||
|
|||
#include "common/common_funcs.h" |
|||
#include "core/hle/kernel/k_dynamic_slab_heap.h" |
|||
#include "core/hle/kernel/k_memory_block.h" |
|||
|
|||
namespace Kernel { |
|||
|
|||
template <typename T, bool ClearNode = false> |
|||
class KDynamicResourceManager { |
|||
YUZU_NON_COPYABLE(KDynamicResourceManager); |
|||
YUZU_NON_MOVEABLE(KDynamicResourceManager); |
|||
|
|||
public: |
|||
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>; |
|||
|
|||
public: |
|||
constexpr KDynamicResourceManager() = default; |
|||
|
|||
constexpr size_t GetSize() const { |
|||
return m_slab_heap->GetSize(); |
|||
} |
|||
constexpr size_t GetUsed() const { |
|||
return m_slab_heap->GetUsed(); |
|||
} |
|||
constexpr size_t GetPeak() const { |
|||
return m_slab_heap->GetPeak(); |
|||
} |
|||
constexpr size_t GetCount() const { |
|||
return m_slab_heap->GetCount(); |
|||
} |
|||
|
|||
void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) { |
|||
m_page_allocator = page_allocator; |
|||
m_slab_heap = slab_heap; |
|||
} |
|||
|
|||
T* Allocate() const { |
|||
return m_slab_heap->Allocate(m_page_allocator); |
|||
} |
|||
|
|||
void Free(T* t) const { |
|||
m_slab_heap->Free(t); |
|||
} |
|||
|
|||
private: |
|||
KDynamicPageManager* m_page_allocator{}; |
|||
DynamicSlabType* m_slab_heap{}; |
|||
}; |
|||
|
|||
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; |
|||
|
|||
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; |
|||
|
|||
} // namespace Kernel |
|||
@ -0,0 +1,122 @@ |
|||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
|||
// SPDX-License-Identifier: GPL-2.0-or-later |
|||
|
|||
#pragma once |
|||
|
|||
#include <atomic> |
|||
|
|||
#include "common/common_funcs.h" |
|||
#include "core/hle/kernel/k_dynamic_page_manager.h" |
|||
#include "core/hle/kernel/k_slab_heap.h" |
|||
|
|||
namespace Kernel { |
|||
|
|||
template <typename T, bool ClearNode = false> |
|||
class KDynamicSlabHeap : protected impl::KSlabHeapImpl { |
|||
YUZU_NON_COPYABLE(KDynamicSlabHeap); |
|||
YUZU_NON_MOVEABLE(KDynamicSlabHeap); |
|||
|
|||
public: |
|||
constexpr KDynamicSlabHeap() = default; |
|||
|
|||
constexpr VAddr GetAddress() const { |
|||
return m_address; |
|||
} |
|||
constexpr size_t GetSize() const { |
|||
return m_size; |
|||
} |
|||
constexpr size_t GetUsed() const { |
|||
return m_used.load(); |
|||
} |
|||
constexpr size_t GetPeak() const { |
|||
return m_peak.load(); |
|||
} |
|||
constexpr size_t GetCount() const { |
|||
return m_count.load(); |
|||
} |
|||
|
|||
constexpr bool IsInRange(VAddr addr) const { |
|||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; |
|||
} |
|||
|
|||
void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) { |
|||
ASSERT(page_allocator != nullptr); |
|||
|
|||
// Initialize members. |
|||
m_address = page_allocator->GetAddress(); |
|||
m_size = page_allocator->GetSize(); |
|||
|
|||
// Initialize the base allocator. |
|||
KSlabHeapImpl::Initialize(); |
|||
|
|||
// Allocate until we have the correct number of objects. |
|||
while (m_count.load() < num_objects) { |
|||
auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate()); |
|||
ASSERT(allocated != nullptr); |
|||
|
|||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { |
|||
KSlabHeapImpl::Free(allocated + i); |
|||
} |
|||
|
|||
m_count += sizeof(PageBuffer) / sizeof(T); |
|||
} |
|||
} |
|||
|
|||
T* Allocate(KDynamicPageManager* page_allocator) { |
|||
T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate()); |
|||
|
|||
// If we successfully allocated and we should clear the node, do so. |
|||
if constexpr (ClearNode) { |
|||
if (allocated != nullptr) [[likely]] { |
|||
reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr; |
|||
} |
|||
} |
|||
|
|||
// If we fail to allocate, try to get a new page from our next allocator. |
|||
if (allocated == nullptr) [[unlikely]] { |
|||
if (page_allocator != nullptr) { |
|||
allocated = reinterpret_cast<T*>(page_allocator->Allocate()); |
|||
if (allocated != nullptr) { |
|||
// If we succeeded in getting a page, free the rest to our slab. |
|||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { |
|||
KSlabHeapImpl::Free(allocated + i); |
|||
} |
|||
m_count += sizeof(PageBuffer) / sizeof(T); |
|||
} |
|||
} |
|||
} |
|||
|
|||
if (allocated != nullptr) [[likely]] { |
|||
// Construct the object. |
|||
std::construct_at(allocated); |
|||
|
|||
// Update our tracking. |
|||
const size_t used = ++m_used; |
|||
size_t peak = m_peak.load(); |
|||
while (peak < used) { |
|||
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
|
|||
return allocated; |
|||
} |
|||
|
|||
void Free(T* t) { |
|||
KSlabHeapImpl::Free(t); |
|||
--m_used; |
|||
} |
|||
|
|||
private: |
|||
using PageBuffer = KDynamicPageManager::PageBuffer; |
|||
|
|||
private: |
|||
std::atomic<size_t> m_used{}; |
|||
std::atomic<size_t> m_peak{}; |
|||
std::atomic<size_t> m_count{}; |
|||
VAddr m_address{}; |
|||
size_t m_size{}; |
|||
}; |
|||
|
|||
} // namespace Kernel |
|||
1302
src/core/hle/kernel/k_page_table.cpp
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
Write
Preview
Loading…
Cancel
Save
Reference in new issue