|
|
@ -48,35 +48,6 @@ constexpr u64 SplitPageAccessWindow = 64; |
|
|
constexpr size_t MaxPreciseAccessPages = 256; |
|
|
constexpr size_t MaxPreciseAccessPages = 256; |
|
|
constexpr u8 MaxPreciseAccessPageWeight = 4; |
|
|
constexpr u8 MaxPreciseAccessPageWeight = 4; |
|
|
|
|
|
|
|
|
[[nodiscard]] constexpr u64 AlignDownPage(u64 addr) { |
|
|
|
|
|
return addr & ~u64{Memory::YUZU_PAGEMASK}; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] bool IsNearPageBoundary(u64 addr) { |
|
|
|
|
|
const u64 page_offset = addr & Memory::YUZU_PAGEMASK; |
|
|
|
|
|
return page_offset < SplitPageAccessWindow || |
|
|
|
|
|
page_offset + SplitPageAccessWindow > Memory::YUZU_PAGESIZE; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] bool IsNearTlsWindow(u64 tls_base, u64 fault_addr) { |
|
|
|
|
|
if (tls_base == 0) { |
|
|
|
|
|
return false; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
const u64 tls_first_page = AlignDownPage(tls_base); |
|
|
|
|
|
const u64 tls_last_byte = tls_base + Kernel::Svc::ThreadLocalRegionSize - 1; |
|
|
|
|
|
const u64 tls_last_page = AlignDownPage(tls_last_byte); |
|
|
|
|
|
const u64 fault_page = AlignDownPage(fault_addr); |
|
|
|
|
|
|
|
|
|
|
|
return fault_page + Memory::YUZU_PAGESIZE >= tls_first_page && |
|
|
|
|
|
fault_page <= tls_last_page + Memory::YUZU_PAGESIZE; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] bool ShouldUsePreciseAccessChannel(const GuestContext* guest_ctx, u64 fault_addr) { |
|
|
|
|
|
return IsNearPageBoundary(fault_addr) || IsNearTlsWindow(guest_ctx->tpidrro_el0, fault_addr) || |
|
|
|
|
|
IsNearTlsWindow(guest_ctx->tpidr_el0, fault_addr); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
void* ArmNce::RestoreGuestContext(void* raw_context) { |
|
|
void* ArmNce::RestoreGuestContext(void* raw_context) { |
|
|
@ -199,20 +170,7 @@ bool ArmNce::HandleGuestAccessFault(GuestContext* guest_ctx, void* raw_info, voi |
|
|
const u64 fault_addr = reinterpret_cast<u64>(info->si_addr); |
|
|
const u64 fault_addr = reinterpret_cast<u64>(info->si_addr); |
|
|
const Common::ProcessAddress addr = fault_addr & ~Memory::YUZU_PAGEMASK; |
|
|
const Common::ProcessAddress addr = fault_addr & ~Memory::YUZU_PAGEMASK; |
|
|
const u64 page_offset = fault_addr & Memory::YUZU_PAGEMASK; |
|
|
const u64 page_offset = fault_addr & Memory::YUZU_PAGEMASK; |
|
|
auto& memory = parent->m_running_thread->GetOwnerProcess()->GetMemory(); |
|
|
|
|
|
const bool rasterizer_cached = memory.IsRasterizerCached(addr); |
|
|
|
|
|
const bool prefer_precise_channel = ShouldUsePreciseAccessChannel(guest_ctx, fault_addr) || |
|
|
|
|
|
parent->IsPreciseAccessPage(fault_addr) || |
|
|
|
|
|
rasterizer_cached; |
|
|
|
|
|
|
|
|
|
|
|
if (prefer_precise_channel) { |
|
|
|
|
|
if (auto next_pc = MatchAndExecuteOneInstruction(memory, &host_ctx, fpctx); next_pc) { |
|
|
|
|
|
parent->MarkPreciseAccessFaultWindow(fault_addr); |
|
|
|
|
|
host_ctx.pc = *next_pc; |
|
|
|
|
|
return true; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto& memory = guest_ctx->parent->m_running_thread->GetOwnerProcess()->GetMemory(); |
|
|
bool handled = memory.InvalidateNCE(addr, Memory::YUZU_PAGESIZE); |
|
|
bool handled = memory.InvalidateNCE(addr, Memory::YUZU_PAGESIZE); |
|
|
|
|
|
|
|
|
if (page_offset < SplitPageAccessWindow && addr >= Memory::YUZU_PAGESIZE) { |
|
|
if (page_offset < SplitPageAccessWindow && addr >= Memory::YUZU_PAGESIZE) { |
|
|
|