Browse Source
Merge pull request #762 from yuriks/memmap
Merge pull request #762 from yuriks/memmap
Memory: Use a table based lookup scheme to read from memory regionspull/15/merge
38 changed files with 572 additions and 589 deletions
-
2src/citra_qt/debugger/callstack.cpp
-
2src/citra_qt/debugger/disassembler.cpp
-
2src/citra_qt/debugger/graphics_framebuffer.cpp
-
2src/citra_qt/debugger/ramview.cpp
-
4src/core/CMakeLists.txt
-
2src/core/arm/dyncom/arm_dyncom_interpreter.cpp
-
1src/core/arm/interpreter/arminit.cpp
-
2src/core/arm/skyeye_common/armmmu.h
-
1src/core/core.cpp
-
2src/core/file_sys/archive_backend.cpp
-
49src/core/hle/config_mem.cpp
-
38src/core/hle/config_mem.h
-
6src/core/hle/function_wrappers.h
-
2src/core/hle/kernel/address_arbiter.cpp
-
2src/core/hle/kernel/process.cpp
-
2src/core/hle/kernel/session.h
-
2src/core/hle/kernel/shared_memory.cpp
-
2src/core/hle/kernel/thread.cpp
-
1src/core/hle/kernel/thread.h
-
1src/core/hle/service/gsp_gpu.cpp
-
57src/core/hle/shared_page.cpp
-
38src/core/hle/shared_page.h
-
2src/core/hle/svc.cpp
-
2src/core/hw/gpu.cpp
-
2src/core/loader/3dsx.cpp
-
2src/core/loader/elf.cpp
-
1src/core/loader/loader.cpp
-
2src/core/loader/ncch.cpp
-
122src/core/mem_map.cpp
-
157src/core/mem_map.h
-
283src/core/mem_map_funcs.cpp
-
202src/core/memory.cpp
-
129src/core/memory.h
-
29src/core/memory_setup.h
-
2src/video_core/pica.h
-
2src/video_core/rasterizer.cpp
-
2src/video_core/renderer_opengl/renderer_opengl.cpp
-
2src/video_core/vertex_shader.cpp
@ -1,283 +0,0 @@ |
|||
// Copyright 2014 Citra Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include <map>
|
|||
|
|||
#include "common/common_types.h"
|
|||
#include "common/logging/log.h"
|
|||
#include "common/swap.h"
|
|||
|
|||
#include "core/mem_map.h"
|
|||
#include "core/hw/hw.h"
|
|||
#include "hle/config_mem.h"
|
|||
#include "hle/shared_page.h"
|
|||
|
|||
namespace Memory { |
|||
|
|||
static std::map<u32, MemoryBlock> heap_map; |
|||
static std::map<u32, MemoryBlock> heap_linear_map; |
|||
|
|||
PAddr VirtualToPhysicalAddress(const VAddr addr) { |
|||
if (addr == 0) { |
|||
return 0; |
|||
} else if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) { |
|||
return addr - VRAM_VADDR + VRAM_PADDR; |
|||
} else if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) { |
|||
return addr - LINEAR_HEAP_VADDR + FCRAM_PADDR; |
|||
} else if (addr >= DSP_RAM_VADDR && addr < DSP_RAM_VADDR_END) { |
|||
return addr - DSP_RAM_VADDR + DSP_RAM_PADDR; |
|||
} else if (addr >= IO_AREA_VADDR && addr < IO_AREA_VADDR_END) { |
|||
return addr - IO_AREA_VADDR + IO_AREA_PADDR; |
|||
} |
|||
|
|||
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%08x", addr); |
|||
// To help with debugging, set bit on address so that it's obviously invalid.
|
|||
return addr | 0x80000000; |
|||
} |
|||
|
|||
VAddr PhysicalToVirtualAddress(const PAddr addr) { |
|||
if (addr == 0) { |
|||
return 0; |
|||
} else if (addr >= VRAM_PADDR && addr < VRAM_PADDR_END) { |
|||
return addr - VRAM_PADDR + VRAM_VADDR; |
|||
} else if (addr >= FCRAM_PADDR && addr < FCRAM_PADDR_END) { |
|||
return addr - FCRAM_PADDR + LINEAR_HEAP_VADDR; |
|||
} else if (addr >= DSP_RAM_PADDR && addr < DSP_RAM_PADDR_END) { |
|||
return addr - DSP_RAM_PADDR + DSP_RAM_VADDR; |
|||
} else if (addr >= IO_AREA_PADDR && addr < IO_AREA_PADDR_END) { |
|||
return addr - IO_AREA_PADDR + IO_AREA_VADDR; |
|||
} |
|||
|
|||
LOG_ERROR(HW_Memory, "Unknown physical address @ 0x%08x", addr); |
|||
// To help with debugging, set bit on address so that it's obviously invalid.
|
|||
return addr | 0x80000000; |
|||
} |
|||
|
|||
template <typename T> |
|||
inline void Read(T &var, const VAddr vaddr) { |
|||
// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
|
|||
// TODO: Make sure this represents the mirrors in a correct way.
|
|||
// Could just do a base-relative read, too.... TODO
|
|||
|
|||
// Kernel memory command buffer
|
|||
if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { |
|||
var = *((const T*)&g_tls_mem[vaddr - TLS_AREA_VADDR]); |
|||
|
|||
// ExeFS:/.code is loaded here
|
|||
} else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { |
|||
var = *((const T*)&g_exefs_code[vaddr - PROCESS_IMAGE_VADDR]); |
|||
|
|||
// FCRAM - linear heap
|
|||
} else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { |
|||
var = *((const T*)&g_heap_linear[vaddr - LINEAR_HEAP_VADDR]); |
|||
|
|||
// FCRAM - application heap
|
|||
} else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
|||
var = *((const T*)&g_heap[vaddr - HEAP_VADDR]); |
|||
|
|||
// Shared memory
|
|||
} else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
|||
var = *((const T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR]); |
|||
|
|||
// Config memory
|
|||
} else if ((vaddr >= CONFIG_MEMORY_VADDR) && (vaddr < CONFIG_MEMORY_VADDR_END)) { |
|||
ConfigMem::Read<T>(var, vaddr); |
|||
|
|||
// Shared page
|
|||
} else if ((vaddr >= SHARED_PAGE_VADDR) && (vaddr < SHARED_PAGE_VADDR_END)) { |
|||
SharedPage::Read<T>(var, vaddr); |
|||
|
|||
// DSP memory
|
|||
} else if ((vaddr >= DSP_RAM_VADDR) && (vaddr < DSP_RAM_VADDR_END)) { |
|||
var = *((const T*)&g_dsp_mem[vaddr - DSP_RAM_VADDR]); |
|||
|
|||
// VRAM
|
|||
} else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
|||
var = *((const T*)&g_vram[vaddr - VRAM_VADDR]); |
|||
|
|||
} else { |
|||
LOG_ERROR(HW_Memory, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, vaddr); |
|||
} |
|||
} |
|||
|
|||
template <typename T> |
|||
inline void Write(const VAddr vaddr, const T data) { |
|||
|
|||
// Kernel memory command buffer
|
|||
if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { |
|||
*(T*)&g_tls_mem[vaddr - TLS_AREA_VADDR] = data; |
|||
|
|||
// ExeFS:/.code is loaded here
|
|||
} else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { |
|||
*(T*)&g_exefs_code[vaddr - PROCESS_IMAGE_VADDR] = data; |
|||
|
|||
// FCRAM - linear heap
|
|||
} else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { |
|||
*(T*)&g_heap_linear[vaddr - LINEAR_HEAP_VADDR] = data; |
|||
|
|||
// FCRAM - application heap
|
|||
} else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
|||
*(T*)&g_heap[vaddr - HEAP_VADDR] = data; |
|||
|
|||
// Shared memory
|
|||
} else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
|||
*(T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR] = data; |
|||
|
|||
// VRAM
|
|||
} else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
|||
*(T*)&g_vram[vaddr - VRAM_VADDR] = data; |
|||
|
|||
// DSP memory
|
|||
} else if ((vaddr >= DSP_RAM_VADDR) && (vaddr < DSP_RAM_VADDR_END)) { |
|||
*(T*)&g_dsp_mem[vaddr - DSP_RAM_VADDR] = data; |
|||
|
|||
//} else if ((vaddr & 0xFFFF0000) == 0x1FF80000) {
|
|||
// ASSERT_MSG(MEMMAP, false, "umimplemented write to Configuration Memory");
|
|||
//} else if ((vaddr & 0xFFFFF000) == 0x1FF81000) {
|
|||
// ASSERT_MSG(MEMMAP, false, "umimplemented write to shared page");
|
|||
|
|||
// Error out...
|
|||
} else { |
|||
LOG_ERROR(HW_Memory, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, vaddr); |
|||
} |
|||
} |
|||
|
|||
u8 *GetPointer(const VAddr vaddr) { |
|||
// Kernel memory command buffer
|
|||
if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { |
|||
return g_tls_mem + (vaddr - TLS_AREA_VADDR); |
|||
|
|||
// ExeFS:/.code is loaded here
|
|||
} else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { |
|||
return g_exefs_code + (vaddr - PROCESS_IMAGE_VADDR); |
|||
|
|||
// FCRAM - linear heap
|
|||
} else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { |
|||
return g_heap_linear + (vaddr - LINEAR_HEAP_VADDR); |
|||
|
|||
// FCRAM - application heap
|
|||
} else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
|||
return g_heap + (vaddr - HEAP_VADDR); |
|||
|
|||
// Shared memory
|
|||
} else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
|||
return g_shared_mem + (vaddr - SHARED_MEMORY_VADDR); |
|||
|
|||
// VRAM
|
|||
} else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
|||
return g_vram + (vaddr - VRAM_VADDR); |
|||
|
|||
} else { |
|||
LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); |
|||
return 0; |
|||
} |
|||
} |
|||
|
|||
u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) { |
|||
MemoryBlock block; |
|||
|
|||
block.base_address = HEAP_VADDR; |
|||
block.size = size; |
|||
block.operation = operation; |
|||
block.permissions = permissions; |
|||
|
|||
if (heap_map.size() > 0) { |
|||
const MemoryBlock last_block = heap_map.rbegin()->second; |
|||
block.address = last_block.address + last_block.size; |
|||
} |
|||
heap_map[block.GetVirtualAddress()] = block; |
|||
|
|||
return block.GetVirtualAddress(); |
|||
} |
|||
|
|||
u32 MapBlock_HeapLinear(u32 size, u32 operation, u32 permissions) { |
|||
MemoryBlock block; |
|||
|
|||
block.base_address = LINEAR_HEAP_VADDR; |
|||
block.size = size; |
|||
block.operation = operation; |
|||
block.permissions = permissions; |
|||
|
|||
if (heap_linear_map.size() > 0) { |
|||
const MemoryBlock last_block = heap_linear_map.rbegin()->second; |
|||
block.address = last_block.address + last_block.size; |
|||
} |
|||
heap_linear_map[block.GetVirtualAddress()] = block; |
|||
|
|||
return block.GetVirtualAddress(); |
|||
} |
|||
|
|||
void MemBlock_Init() { |
|||
} |
|||
|
|||
void MemBlock_Shutdown() { |
|||
heap_map.clear(); |
|||
heap_linear_map.clear(); |
|||
} |
|||
|
|||
u8 Read8(const VAddr addr) { |
|||
u8 data = 0; |
|||
Read<u8>(data, addr); |
|||
return data; |
|||
} |
|||
|
|||
u16 Read16(const VAddr addr) { |
|||
u16_le data = 0; |
|||
Read<u16_le>(data, addr); |
|||
return (u16)data; |
|||
} |
|||
|
|||
u32 Read32(const VAddr addr) { |
|||
u32_le data = 0; |
|||
Read<u32_le>(data, addr); |
|||
return (u32)data; |
|||
} |
|||
|
|||
u64 Read64(const VAddr addr) { |
|||
u64_le data = 0; |
|||
Read<u64_le>(data, addr); |
|||
return (u64)data; |
|||
} |
|||
|
|||
u32 Read8_ZX(const VAddr addr) { |
|||
return (u32)Read8(addr); |
|||
} |
|||
|
|||
u32 Read16_ZX(const VAddr addr) { |
|||
return (u32)Read16(addr); |
|||
} |
|||
|
|||
void Write8(const VAddr addr, const u8 data) { |
|||
Write<u8>(addr, data); |
|||
} |
|||
|
|||
void Write16(const VAddr addr, const u16 data) { |
|||
Write<u16_le>(addr, data); |
|||
} |
|||
|
|||
void Write32(const VAddr addr, const u32 data) { |
|||
Write<u32_le>(addr, data); |
|||
} |
|||
|
|||
void Write64(const VAddr addr, const u64 data) { |
|||
Write<u64_le>(addr, data); |
|||
} |
|||
|
|||
void WriteBlock(const VAddr addr, const u8* data, const size_t size) { |
|||
u32 offset = 0; |
|||
while (offset < (size & ~3)) { |
|||
Write32(addr + offset, *(u32*)&data[offset]); |
|||
offset += 4; |
|||
} |
|||
|
|||
if (size & 2) { |
|||
Write16(addr + offset, *(u16*)&data[offset]); |
|||
offset += 2; |
|||
} |
|||
|
|||
if (size & 1) |
|||
Write8(addr + offset, data[offset]); |
|||
} |
|||
|
|||
} // namespace
|
|||
@ -0,0 +1,202 @@ |
|||
// Copyright 2015 Citra Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include <array>
|
|||
|
|||
#include "common/assert.h"
|
|||
#include "common/common_types.h"
|
|||
#include "common/logging/log.h"
|
|||
#include "common/swap.h"
|
|||
|
|||
#include "core/hle/config_mem.h"
|
|||
#include "core/hle/shared_page.h"
|
|||
#include "core/hw/hw.h"
|
|||
#include "core/mem_map.h"
|
|||
#include "core/memory.h"
|
|||
|
|||
namespace Memory { |
|||
|
|||
const u32 PAGE_MASK = PAGE_SIZE - 1; |
|||
const int PAGE_BITS = 12; |
|||
|
|||
enum class PageType { |
|||
/// Page is unmapped and should cause an access error.
|
|||
Unmapped, |
|||
/// Page is mapped to regular memory. This is the only type you can get pointers to.
|
|||
Memory, |
|||
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
|
|||
Special, |
|||
}; |
|||
|
|||
/**
|
|||
* A (reasonably) fast way of allowing switchable and remmapable process address spaces. It loosely |
|||
* mimics the way a real CPU page table works, but instead is optimized for minimal decoding and |
|||
* fetching requirements when acessing. In the usual case of an access to regular memory, it only |
|||
* requires an indexed fetch and a check for NULL. |
|||
*/ |
|||
struct PageTable { |
|||
static const size_t NUM_ENTRIES = 1 << (32 - PAGE_BITS); |
|||
|
|||
/**
|
|||
* Array of memory pointers backing each page. An entry can only be non-null if the |
|||
* corresponding entry in the `attributes` array is of type `Memory`. |
|||
*/ |
|||
std::array<u8*, NUM_ENTRIES> pointers; |
|||
|
|||
/**
|
|||
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then |
|||
* the corresponding entry in `pointer` MUST be set to null. |
|||
*/ |
|||
std::array<PageType, NUM_ENTRIES> attributes; |
|||
}; |
|||
|
|||
/// Singular page table used for the singleton process
|
|||
static PageTable main_page_table; |
|||
/// Currently active page table
|
|||
static PageTable* current_page_table = &main_page_table; |
|||
|
|||
static void MapPages(u32 base, u32 size, u8* memory, PageType type) { |
|||
LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, (base + size) * PAGE_SIZE); |
|||
|
|||
u32 end = base + size; |
|||
|
|||
while (base != end) { |
|||
ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base); |
|||
|
|||
if (current_page_table->attributes[base] != PageType::Unmapped) { |
|||
LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE); |
|||
} |
|||
current_page_table->attributes[base] = type; |
|||
current_page_table->pointers[base] = memory; |
|||
|
|||
base += 1; |
|||
memory += PAGE_SIZE; |
|||
} |
|||
} |
|||
|
|||
void InitMemoryMap() { |
|||
main_page_table.pointers.fill(nullptr); |
|||
main_page_table.attributes.fill(PageType::Unmapped); |
|||
} |
|||
|
|||
void MapMemoryRegion(VAddr base, u32 size, u8* target) { |
|||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
|||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
|||
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); |
|||
} |
|||
|
|||
void MapIoRegion(VAddr base, u32 size) { |
|||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
|||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
|||
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); |
|||
} |
|||
|
|||
template <typename T> |
|||
T Read(const VAddr vaddr) { |
|||
const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
|||
if (page_pointer) { |
|||
return *reinterpret_cast<const T*>(page_pointer + (vaddr & PAGE_MASK)); |
|||
} |
|||
|
|||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
|||
switch (type) { |
|||
case PageType::Unmapped: |
|||
LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr); |
|||
return 0; |
|||
case PageType::Memory: |
|||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); |
|||
case PageType::Special: |
|||
LOG_ERROR(HW_Memory, "I/O reads aren't implemented yet @ %08X", vaddr); |
|||
return 0; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
} |
|||
|
|||
template <typename T> |
|||
void Write(const VAddr vaddr, const T data) { |
|||
u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
|||
if (page_pointer) { |
|||
*reinterpret_cast<T*>(page_pointer + (vaddr & PAGE_MASK)) = data; |
|||
return; |
|||
} |
|||
|
|||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
|||
switch (type) { |
|||
case PageType::Unmapped: |
|||
LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32) data, vaddr); |
|||
return; |
|||
case PageType::Memory: |
|||
ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); |
|||
case PageType::Special: |
|||
LOG_ERROR(HW_Memory, "I/O writes aren't implemented yet @ %08X", vaddr); |
|||
return; |
|||
default: |
|||
UNREACHABLE(); |
|||
} |
|||
} |
|||
|
|||
u8* GetPointer(const VAddr vaddr) { |
|||
u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
|||
if (page_pointer) { |
|||
return page_pointer + (vaddr & PAGE_MASK); |
|||
} |
|||
|
|||
LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); |
|||
return nullptr; |
|||
} |
|||
|
|||
u8* GetPhysicalPointer(PAddr address) { |
|||
return GetPointer(PhysicalToVirtualAddress(address)); |
|||
} |
|||
|
|||
u8 Read8(const VAddr addr) { |
|||
return Read<u8>(addr); |
|||
} |
|||
|
|||
u16 Read16(const VAddr addr) { |
|||
return Read<u16_le>(addr); |
|||
} |
|||
|
|||
u32 Read32(const VAddr addr) { |
|||
return Read<u32_le>(addr); |
|||
} |
|||
|
|||
u64 Read64(const VAddr addr) { |
|||
return Read<u64_le>(addr); |
|||
} |
|||
|
|||
void Write8(const VAddr addr, const u8 data) { |
|||
Write<u8>(addr, data); |
|||
} |
|||
|
|||
void Write16(const VAddr addr, const u16 data) { |
|||
Write<u16_le>(addr, data); |
|||
} |
|||
|
|||
void Write32(const VAddr addr, const u32 data) { |
|||
Write<u32_le>(addr, data); |
|||
} |
|||
|
|||
void Write64(const VAddr addr, const u64 data) { |
|||
Write<u64_le>(addr, data); |
|||
} |
|||
|
|||
void WriteBlock(const VAddr addr, const u8* data, const size_t size) { |
|||
u32 offset = 0; |
|||
while (offset < (size & ~3)) { |
|||
Write32(addr + offset, *(u32*)&data[offset]); |
|||
offset += 4; |
|||
} |
|||
|
|||
if (size & 2) { |
|||
Write16(addr + offset, *(u16*)&data[offset]); |
|||
offset += 2; |
|||
} |
|||
|
|||
if (size & 1) |
|||
Write8(addr + offset, data[offset]); |
|||
} |
|||
|
|||
} // namespace
|
|||
@ -0,0 +1,129 @@ |
|||
// Copyright 2014 Citra Emulator Project |
|||
// Licensed under GPLv2 or any later version |
|||
// Refer to the license.txt file included. |
|||
|
|||
#pragma once |
|||
|
|||
#include "common/common_types.h" |
|||
|
|||
namespace Memory { |
|||
|
|||
/** |
|||
* Page size used by the ARM architecture. This is the smallest granularity with which memory can |
|||
* be mapped. |
|||
*/ |
|||
const u32 PAGE_SIZE = 0x1000; |
|||
|
|||
/// Physical memory regions as seen from the ARM11 |
|||
enum : PAddr { |
|||
/// IO register area |
|||
IO_AREA_PADDR = 0x10100000, |
|||
IO_AREA_SIZE = 0x01000000, ///< IO area size (16MB) |
|||
IO_AREA_PADDR_END = IO_AREA_PADDR + IO_AREA_SIZE, |
|||
|
|||
/// MPCore internal memory region |
|||
MPCORE_RAM_PADDR = 0x17E00000, |
|||
MPCORE_RAM_SIZE = 0x00002000, ///< MPCore internal memory size (8KB) |
|||
MPCORE_RAM_PADDR_END = MPCORE_RAM_PADDR + MPCORE_RAM_SIZE, |
|||
|
|||
/// Video memory |
|||
VRAM_PADDR = 0x18000000, |
|||
VRAM_SIZE = 0x00600000, ///< VRAM size (6MB) |
|||
VRAM_PADDR_END = VRAM_PADDR + VRAM_SIZE, |
|||
|
|||
/// DSP memory |
|||
DSP_RAM_PADDR = 0x1FF00000, |
|||
DSP_RAM_SIZE = 0x00080000, ///< DSP memory size (512KB) |
|||
DSP_RAM_PADDR_END = DSP_RAM_PADDR + DSP_RAM_SIZE, |
|||
|
|||
/// AXI WRAM |
|||
AXI_WRAM_PADDR = 0x1FF80000, |
|||
AXI_WRAM_SIZE = 0x00080000, ///< AXI WRAM size (512KB) |
|||
AXI_WRAM_PADDR_END = AXI_WRAM_PADDR + AXI_WRAM_SIZE, |
|||
|
|||
/// Main FCRAM |
|||
FCRAM_PADDR = 0x20000000, |
|||
FCRAM_SIZE = 0x08000000, ///< FCRAM size (128MB) |
|||
FCRAM_PADDR_END = FCRAM_PADDR + FCRAM_SIZE, |
|||
}; |
|||
|
|||
/// Virtual user-space memory regions |
|||
enum : VAddr { |
|||
/// Where the application text, data and bss reside. |
|||
PROCESS_IMAGE_VADDR = 0x00100000, |
|||
PROCESS_IMAGE_MAX_SIZE = 0x03F00000, |
|||
PROCESS_IMAGE_VADDR_END = PROCESS_IMAGE_VADDR + PROCESS_IMAGE_MAX_SIZE, |
|||
|
|||
/// Area where IPC buffers are mapped onto. |
|||
IPC_MAPPING_VADDR = 0x04000000, |
|||
IPC_MAPPING_SIZE = 0x04000000, |
|||
IPC_MAPPING_VADDR_END = IPC_MAPPING_VADDR + IPC_MAPPING_SIZE, |
|||
|
|||
/// Application heap (includes stack). |
|||
HEAP_VADDR = 0x08000000, |
|||
HEAP_SIZE = 0x08000000, |
|||
HEAP_VADDR_END = HEAP_VADDR + HEAP_SIZE, |
|||
|
|||
/// Area where shared memory buffers are mapped onto. |
|||
SHARED_MEMORY_VADDR = 0x10000000, |
|||
SHARED_MEMORY_SIZE = 0x04000000, |
|||
SHARED_MEMORY_VADDR_END = SHARED_MEMORY_VADDR + SHARED_MEMORY_SIZE, |
|||
|
|||
/// Maps 1:1 to an offset in FCRAM. Used for HW allocations that need to be linear in physical memory. |
|||
LINEAR_HEAP_VADDR = 0x14000000, |
|||
LINEAR_HEAP_SIZE = 0x08000000, |
|||
LINEAR_HEAP_VADDR_END = LINEAR_HEAP_VADDR + LINEAR_HEAP_SIZE, |
|||
|
|||
/// Maps 1:1 to the IO register area. |
|||
IO_AREA_VADDR = 0x1EC00000, |
|||
IO_AREA_VADDR_END = IO_AREA_VADDR + IO_AREA_SIZE, |
|||
|
|||
/// Maps 1:1 to VRAM. |
|||
VRAM_VADDR = 0x1F000000, |
|||
VRAM_VADDR_END = VRAM_VADDR + VRAM_SIZE, |
|||
|
|||
/// Maps 1:1 to DSP memory. |
|||
DSP_RAM_VADDR = 0x1FF00000, |
|||
DSP_RAM_VADDR_END = DSP_RAM_VADDR + DSP_RAM_SIZE, |
|||
|
|||
/// Read-only page containing kernel and system configuration values. |
|||
CONFIG_MEMORY_VADDR = 0x1FF80000, |
|||
CONFIG_MEMORY_SIZE = 0x00001000, |
|||
CONFIG_MEMORY_VADDR_END = CONFIG_MEMORY_VADDR + CONFIG_MEMORY_SIZE, |
|||
|
|||
/// Usually read-only page containing mostly values read from hardware. |
|||
SHARED_PAGE_VADDR = 0x1FF81000, |
|||
SHARED_PAGE_SIZE = 0x00001000, |
|||
SHARED_PAGE_VADDR_END = SHARED_PAGE_VADDR + SHARED_PAGE_SIZE, |
|||
|
|||
// TODO(yuriks): The size of this area is dynamic, the kernel grows |
|||
// it as more and more threads are created. For now we'll just use a |
|||
// hardcoded value. |
|||
/// Area where TLS (Thread-Local Storage) buffers are allocated. |
|||
TLS_AREA_VADDR = 0x1FF82000, |
|||
TLS_AREA_SIZE = 0x00030000, // Each TLS buffer is 0x200 bytes, allows for 300 threads |
|||
TLS_AREA_VADDR_END = TLS_AREA_VADDR + TLS_AREA_SIZE, |
|||
}; |
|||
|
|||
u8 Read8(VAddr addr); |
|||
u16 Read16(VAddr addr); |
|||
u32 Read32(VAddr addr); |
|||
u64 Read64(VAddr addr); |
|||
|
|||
void Write8(VAddr addr, u8 data); |
|||
void Write16(VAddr addr, u16 data); |
|||
void Write32(VAddr addr, u32 data); |
|||
void Write64(VAddr addr, u64 data); |
|||
|
|||
void WriteBlock(VAddr addr, const u8* data, size_t size); |
|||
|
|||
u8* GetPointer(VAddr virtual_address); |
|||
|
|||
/** |
|||
* Gets a pointer to the memory region beginning at the specified physical address. |
|||
* |
|||
* @note This is currently implemented using PhysicalToVirtualAddress(). |
|||
*/ |
|||
u8* GetPhysicalPointer(PAddr address); |
|||
|
|||
} |
|||
@ -0,0 +1,29 @@ |
|||
// Copyright 2015 Citra Emulator Project |
|||
// Licensed under GPLv2 or any later version |
|||
// Refer to the license.txt file included. |
|||
|
|||
#pragma once |
|||
|
|||
#include "common/common_types.h" |
|||
|
|||
namespace Memory { |
|||
|
|||
void InitMemoryMap(); |
|||
|
|||
/** |
|||
* Maps an allocated buffer onto a region of the emulated process address space. |
|||
* |
|||
* @param base The address to start mapping at. Must be page-aligned. |
|||
* @param size The amount of bytes to map. Must be page-aligned. |
|||
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`. |
|||
*/ |
|||
void MapMemoryRegion(VAddr base, u32 size, u8* target); |
|||
|
|||
/** |
|||
* Maps a region of the emulated process address space as a IO region. |
|||
* @note Currently this can only be used to mark a region as being IO, since actual memory-mapped |
|||
* IO isn't yet supported. |
|||
*/ |
|||
void MapIoRegion(VAddr base, u32 size); |
|||
|
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue