Browse Source
shader: Move pipeline cache logic to separate files
shader: Move pipeline cache logic to separate files
Move code to separate files to be able to reuse it from OpenGL. This greatly simplifies the pipeline cache logic on Vulkan. Transform feedback state is not yet abstracted and it's still intrusively stored inside vk_pipeline_cache. It will be moved when needed on OpenGL.pull/15/merge
committed by
ameerj
12 changed files with 1095 additions and 824 deletions
-
3src/video_core/CMakeLists.txt
-
2src/video_core/renderer_opengl/gl_rasterizer.h
-
21src/video_core/renderer_opengl/gl_shader_cache.cpp
-
58src/video_core/renderer_opengl/gl_shader_cache.h
-
2src/video_core/renderer_vulkan/vk_graphics_pipeline.h
-
719src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
-
30src/video_core/renderer_vulkan/vk_pipeline_cache.h
-
2src/video_core/renderer_vulkan/vk_rasterizer.cpp
-
233src/video_core/shader_cache.cpp
-
198src/video_core/shader_cache.h
-
453src/video_core/shader_environment.cpp
-
198src/video_core/shader_environment.h
@ -0,0 +1,233 @@ |
|||
// Copyright 2021 yuzu Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include <algorithm>
|
|||
#include <array>
|
|||
#include <vector>
|
|||
|
|||
#include "common/assert.h"
|
|||
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
|||
#include "shader_recompiler/object_pool.h"
|
|||
#include "video_core/dirty_flags.h"
|
|||
#include "video_core/engines/kepler_compute.h"
|
|||
#include "video_core/engines/maxwell_3d.h"
|
|||
#include "video_core/memory_manager.h"
|
|||
#include "video_core/shader_cache.h"
|
|||
#include "video_core/shader_environment.h"
|
|||
|
|||
namespace VideoCommon { |
|||
|
|||
void ShaderCache::InvalidateRegion(VAddr addr, size_t size) { |
|||
std::scoped_lock lock{invalidation_mutex}; |
|||
InvalidatePagesInRegion(addr, size); |
|||
RemovePendingShaders(); |
|||
} |
|||
|
|||
void ShaderCache::OnCPUWrite(VAddr addr, size_t size) { |
|||
std::lock_guard lock{invalidation_mutex}; |
|||
InvalidatePagesInRegion(addr, size); |
|||
} |
|||
|
|||
void ShaderCache::SyncGuestHost() { |
|||
std::scoped_lock lock{invalidation_mutex}; |
|||
RemovePendingShaders(); |
|||
} |
|||
|
|||
ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, |
|||
Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, |
|||
Tegra::Engines::KeplerCompute& kepler_compute_) |
|||
: gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, |
|||
rasterizer{rasterizer_} {} |
|||
|
|||
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { |
|||
auto& dirty{maxwell3d.dirty.flags}; |
|||
if (!dirty[VideoCommon::Dirty::Shaders]) { |
|||
return last_shaders_valid; |
|||
} |
|||
dirty[VideoCommon::Dirty::Shaders] = false; |
|||
|
|||
const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; |
|||
for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { |
|||
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { |
|||
unique_hashes[index] = 0; |
|||
continue; |
|||
} |
|||
const auto& shader_config{maxwell3d.regs.shader_config[index]}; |
|||
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; |
|||
const GPUVAddr shader_addr{base_addr + shader_config.offset}; |
|||
const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; |
|||
if (!cpu_shader_addr) { |
|||
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); |
|||
last_shaders_valid = false; |
|||
return false; |
|||
} |
|||
const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; |
|||
if (!shader_info) { |
|||
const u32 start_address{shader_config.offset}; |
|||
GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; |
|||
shader_info = MakeShaderInfo(env, *cpu_shader_addr); |
|||
} |
|||
shader_infos[index] = shader_info; |
|||
unique_hashes[index] = shader_info->unique_hash; |
|||
} |
|||
last_shaders_valid = true; |
|||
return true; |
|||
} |
|||
|
|||
const ShaderInfo* ShaderCache::ComputeShader() { |
|||
const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; |
|||
const auto& qmd{kepler_compute.launch_description}; |
|||
const GPUVAddr shader_addr{program_base + qmd.program_start}; |
|||
const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; |
|||
if (!cpu_shader_addr) { |
|||
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); |
|||
return nullptr; |
|||
} |
|||
if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { |
|||
return shader; |
|||
} |
|||
ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; |
|||
return MakeShaderInfo(env, *cpu_shader_addr); |
|||
} |
|||
|
|||
ShaderInfo* ShaderCache::TryGet(VAddr addr) const { |
|||
std::scoped_lock lock{lookup_mutex}; |
|||
|
|||
const auto it = lookup_cache.find(addr); |
|||
if (it == lookup_cache.end()) { |
|||
return nullptr; |
|||
} |
|||
return it->second->data; |
|||
} |
|||
|
|||
void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t size) { |
|||
std::scoped_lock lock{invalidation_mutex, lookup_mutex}; |
|||
|
|||
const VAddr addr_end = addr + size; |
|||
Entry* const entry = NewEntry(addr, addr_end, data.get()); |
|||
|
|||
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; |
|||
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { |
|||
invalidation_cache[page].push_back(entry); |
|||
} |
|||
|
|||
storage.push_back(std::move(data)); |
|||
|
|||
rasterizer.UpdatePagesCachedCount(addr, size, 1); |
|||
} |
|||
|
|||
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { |
|||
const VAddr addr_end = addr + size; |
|||
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; |
|||
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { |
|||
auto it = invalidation_cache.find(page); |
|||
if (it == invalidation_cache.end()) { |
|||
continue; |
|||
} |
|||
InvalidatePageEntries(it->second, addr, addr_end); |
|||
} |
|||
} |
|||
|
|||
void ShaderCache::RemovePendingShaders() { |
|||
if (marked_for_removal.empty()) { |
|||
return; |
|||
} |
|||
// Remove duplicates
|
|||
std::ranges::sort(marked_for_removal); |
|||
marked_for_removal.erase(std::unique(marked_for_removal.begin(), marked_for_removal.end()), |
|||
marked_for_removal.end()); |
|||
|
|||
std::vector<ShaderInfo*> removed_shaders; |
|||
removed_shaders.reserve(marked_for_removal.size()); |
|||
|
|||
std::scoped_lock lock{lookup_mutex}; |
|||
|
|||
for (Entry* const entry : marked_for_removal) { |
|||
removed_shaders.push_back(entry->data); |
|||
|
|||
const auto it = lookup_cache.find(entry->addr_start); |
|||
ASSERT(it != lookup_cache.end()); |
|||
lookup_cache.erase(it); |
|||
} |
|||
marked_for_removal.clear(); |
|||
|
|||
if (!removed_shaders.empty()) { |
|||
RemoveShadersFromStorage(std::move(removed_shaders)); |
|||
} |
|||
} |
|||
|
|||
void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) { |
|||
size_t index = 0; |
|||
while (index < entries.size()) { |
|||
Entry* const entry = entries[index]; |
|||
if (!entry->Overlaps(addr, addr_end)) { |
|||
++index; |
|||
continue; |
|||
} |
|||
|
|||
UnmarkMemory(entry); |
|||
RemoveEntryFromInvalidationCache(entry); |
|||
marked_for_removal.push_back(entry); |
|||
} |
|||
} |
|||
|
|||
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { |
|||
const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS; |
|||
for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) { |
|||
const auto entries_it = invalidation_cache.find(page); |
|||
ASSERT(entries_it != invalidation_cache.end()); |
|||
std::vector<Entry*>& entries = entries_it->second; |
|||
|
|||
const auto entry_it = std::ranges::find(entries, entry); |
|||
ASSERT(entry_it != entries.end()); |
|||
entries.erase(entry_it); |
|||
} |
|||
} |
|||
|
|||
void ShaderCache::UnmarkMemory(Entry* entry) { |
|||
if (!entry->is_memory_marked) { |
|||
return; |
|||
} |
|||
entry->is_memory_marked = false; |
|||
|
|||
const VAddr addr = entry->addr_start; |
|||
const size_t size = entry->addr_end - addr; |
|||
rasterizer.UpdatePagesCachedCount(addr, size, -1); |
|||
} |
|||
|
|||
void ShaderCache::RemoveShadersFromStorage(std::vector<ShaderInfo*> removed_shaders) { |
|||
// Remove them from the cache
|
|||
std::erase_if(storage, [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) { |
|||
return std::ranges::find(removed_shaders, shader.get()) != removed_shaders.end(); |
|||
}); |
|||
} |
|||
|
|||
ShaderCache::Entry* ShaderCache::NewEntry(VAddr addr, VAddr addr_end, ShaderInfo* data) { |
|||
auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data}); |
|||
Entry* const entry_pointer = entry.get(); |
|||
|
|||
lookup_cache.emplace(addr, std::move(entry)); |
|||
return entry_pointer; |
|||
} |
|||
|
|||
const ShaderInfo* ShaderCache::MakeShaderInfo(GenericEnvironment& env, VAddr cpu_addr) { |
|||
auto info = std::make_unique<ShaderInfo>(); |
|||
if (const std::optional<u64> cached_hash{env.Analyze()}) { |
|||
info->unique_hash = *cached_hash; |
|||
info->size_bytes = env.CachedSize(); |
|||
} else { |
|||
// Slow path, not really hit on commercial games
|
|||
// Build a control flow graph to get the real shader size
|
|||
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block; |
|||
Shader::Maxwell::Flow::CFG cfg{env, flow_block, env.StartAddress()}; |
|||
info->unique_hash = env.CalculateHash(); |
|||
info->size_bytes = env.ReadSize(); |
|||
} |
|||
const size_t size_bytes{info->size_bytes}; |
|||
const ShaderInfo* const result{info.get()}; |
|||
Register(std::move(info), cpu_addr, size_bytes); |
|||
return result; |
|||
} |
|||
|
|||
} // namespace VideoCommon
|
|||
@ -0,0 +1,453 @@ |
|||
// Copyright 2021 yuzu Emulator Project
|
|||
// Licensed under GPLv2 or any later version
|
|||
// Refer to the license.txt file included.
|
|||
|
|||
#include <filesystem>
|
|||
#include <fstream>
|
|||
#include <memory>
|
|||
#include <optional>
|
|||
#include <utility>
|
|||
|
|||
#include "common/assert.h"
|
|||
#include "common/cityhash.h"
|
|||
#include "common/common_types.h"
|
|||
#include "common/div_ceil.h"
|
|||
#include "common/fs/fs.h"
|
|||
#include "common/logging/log.h"
|
|||
#include "shader_recompiler/environment.h"
|
|||
#include "video_core/memory_manager.h"
|
|||
#include "video_core/shader_environment.h"
|
|||
#include "video_core/textures/texture.h"
|
|||
|
|||
namespace VideoCommon { |
|||
|
|||
constexpr std::array<char, 8> MAGIC_NUMBER{'y', 'u', 'z', 'u', 'c', 'a', 'c', 'h'}; |
|||
constexpr u32 CACHE_VERSION = 3; |
|||
|
|||
constexpr size_t INST_SIZE = sizeof(u64); |
|||
|
|||
using Maxwell = Tegra::Engines::Maxwell3D::Regs; |
|||
|
|||
static u64 MakeCbufKey(u32 index, u32 offset) { |
|||
return (static_cast<u64>(index) << 32) | offset; |
|||
} |
|||
|
|||
static Shader::TextureType ConvertType(const Tegra::Texture::TICEntry& entry) { |
|||
switch (entry.texture_type) { |
|||
case Tegra::Texture::TextureType::Texture1D: |
|||
return Shader::TextureType::Color1D; |
|||
case Tegra::Texture::TextureType::Texture2D: |
|||
case Tegra::Texture::TextureType::Texture2DNoMipmap: |
|||
return Shader::TextureType::Color2D; |
|||
case Tegra::Texture::TextureType::Texture3D: |
|||
return Shader::TextureType::Color3D; |
|||
case Tegra::Texture::TextureType::TextureCubemap: |
|||
return Shader::TextureType::ColorCube; |
|||
case Tegra::Texture::TextureType::Texture1DArray: |
|||
return Shader::TextureType::ColorArray1D; |
|||
case Tegra::Texture::TextureType::Texture2DArray: |
|||
return Shader::TextureType::ColorArray2D; |
|||
case Tegra::Texture::TextureType::Texture1DBuffer: |
|||
return Shader::TextureType::Buffer; |
|||
case Tegra::Texture::TextureType::TextureCubeArray: |
|||
return Shader::TextureType::ColorArrayCube; |
|||
default: |
|||
throw Shader::NotImplementedException("Unknown texture type"); |
|||
} |
|||
} |
|||
|
|||
GenericEnvironment::GenericEnvironment(Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_, |
|||
u32 start_address_) |
|||
: gpu_memory{&gpu_memory_}, program_base{program_base_} { |
|||
start_address = start_address_; |
|||
} |
|||
|
|||
GenericEnvironment::~GenericEnvironment() = default; |
|||
|
|||
u32 GenericEnvironment::TextureBoundBuffer() const { |
|||
return texture_bound; |
|||
} |
|||
|
|||
u32 GenericEnvironment::LocalMemorySize() const { |
|||
return local_memory_size; |
|||
} |
|||
|
|||
u32 GenericEnvironment::SharedMemorySize() const { |
|||
return shared_memory_size; |
|||
} |
|||
|
|||
std::array<u32, 3> GenericEnvironment::WorkgroupSize() const { |
|||
return workgroup_size; |
|||
} |
|||
|
|||
u64 GenericEnvironment::ReadInstruction(u32 address) { |
|||
read_lowest = std::min(read_lowest, address); |
|||
read_highest = std::max(read_highest, address); |
|||
|
|||
if (address >= cached_lowest && address < cached_highest) { |
|||
return code[(address - cached_lowest) / INST_SIZE]; |
|||
} |
|||
has_unbound_instructions = true; |
|||
return gpu_memory->Read<u64>(program_base + address); |
|||
} |
|||
|
|||
std::optional<u64> GenericEnvironment::Analyze() { |
|||
const std::optional<u64> size{TryFindSize()}; |
|||
if (!size) { |
|||
return std::nullopt; |
|||
} |
|||
cached_lowest = start_address; |
|||
cached_highest = start_address + static_cast<u32>(*size); |
|||
return Common::CityHash64(reinterpret_cast<const char*>(code.data()), *size); |
|||
} |
|||
|
|||
void GenericEnvironment::SetCachedSize(size_t size_bytes) { |
|||
cached_lowest = start_address; |
|||
cached_highest = start_address + static_cast<u32>(size_bytes); |
|||
code.resize(CachedSize()); |
|||
gpu_memory->ReadBlock(program_base + cached_lowest, code.data(), code.size() * sizeof(u64)); |
|||
} |
|||
|
|||
size_t GenericEnvironment::CachedSize() const noexcept { |
|||
return cached_highest - cached_lowest + INST_SIZE; |
|||
} |
|||
|
|||
size_t GenericEnvironment::ReadSize() const noexcept { |
|||
return read_highest - read_lowest + INST_SIZE; |
|||
} |
|||
|
|||
bool GenericEnvironment::CanBeSerialized() const noexcept { |
|||
return !has_unbound_instructions; |
|||
} |
|||
|
|||
u64 GenericEnvironment::CalculateHash() const { |
|||
const size_t size{ReadSize()}; |
|||
const auto data{std::make_unique<char[]>(size)}; |
|||
gpu_memory->ReadBlock(program_base + read_lowest, data.get(), size); |
|||
return Common::CityHash64(data.get(), size); |
|||
} |
|||
|
|||
void GenericEnvironment::Serialize(std::ofstream& file) const { |
|||
const u64 code_size{static_cast<u64>(CachedSize())}; |
|||
const u64 num_texture_types{static_cast<u64>(texture_types.size())}; |
|||
const u64 num_cbuf_values{static_cast<u64>(cbuf_values.size())}; |
|||
|
|||
file.write(reinterpret_cast<const char*>(&code_size), sizeof(code_size)) |
|||
.write(reinterpret_cast<const char*>(&num_texture_types), sizeof(num_texture_types)) |
|||
.write(reinterpret_cast<const char*>(&num_cbuf_values), sizeof(num_cbuf_values)) |
|||
.write(reinterpret_cast<const char*>(&local_memory_size), sizeof(local_memory_size)) |
|||
.write(reinterpret_cast<const char*>(&texture_bound), sizeof(texture_bound)) |
|||
.write(reinterpret_cast<const char*>(&start_address), sizeof(start_address)) |
|||
.write(reinterpret_cast<const char*>(&cached_lowest), sizeof(cached_lowest)) |
|||
.write(reinterpret_cast<const char*>(&cached_highest), sizeof(cached_highest)) |
|||
.write(reinterpret_cast<const char*>(&stage), sizeof(stage)) |
|||
.write(reinterpret_cast<const char*>(code.data()), code_size); |
|||
for (const auto [key, type] : texture_types) { |
|||
file.write(reinterpret_cast<const char*>(&key), sizeof(key)) |
|||
.write(reinterpret_cast<const char*>(&type), sizeof(type)); |
|||
} |
|||
for (const auto [key, type] : cbuf_values) { |
|||
file.write(reinterpret_cast<const char*>(&key), sizeof(key)) |
|||
.write(reinterpret_cast<const char*>(&type), sizeof(type)); |
|||
} |
|||
if (stage == Shader::Stage::Compute) { |
|||
file.write(reinterpret_cast<const char*>(&workgroup_size), sizeof(workgroup_size)) |
|||
.write(reinterpret_cast<const char*>(&shared_memory_size), sizeof(shared_memory_size)); |
|||
} else { |
|||
file.write(reinterpret_cast<const char*>(&sph), sizeof(sph)); |
|||
} |
|||
} |
|||
|
|||
std::optional<u64> GenericEnvironment::TryFindSize() { |
|||
static constexpr size_t BLOCK_SIZE = 0x1000; |
|||
static constexpr size_t MAXIMUM_SIZE = 0x100000; |
|||
|
|||
static constexpr u64 SELF_BRANCH_A = 0xE2400FFFFF87000FULL; |
|||
static constexpr u64 SELF_BRANCH_B = 0xE2400FFFFF07000FULL; |
|||
|
|||
GPUVAddr guest_addr{program_base + start_address}; |
|||
size_t offset{0}; |
|||
size_t size{BLOCK_SIZE}; |
|||
while (size <= MAXIMUM_SIZE) { |
|||
code.resize(size / INST_SIZE); |
|||
u64* const data = code.data() + offset / INST_SIZE; |
|||
gpu_memory->ReadBlock(guest_addr, data, BLOCK_SIZE); |
|||
for (size_t index = 0; index < BLOCK_SIZE; index += INST_SIZE) { |
|||
const u64 inst = data[index / INST_SIZE]; |
|||
if (inst == SELF_BRANCH_A || inst == SELF_BRANCH_B) { |
|||
return offset + index; |
|||
} |
|||
} |
|||
guest_addr += BLOCK_SIZE; |
|||
size += BLOCK_SIZE; |
|||
offset += BLOCK_SIZE; |
|||
} |
|||
return std::nullopt; |
|||
} |
|||
|
|||
Shader::TextureType GenericEnvironment::ReadTextureTypeImpl(GPUVAddr tic_addr, u32 tic_limit, |
|||
bool via_header_index, u32 raw) { |
|||
const TextureHandle handle{raw, via_header_index}; |
|||
const GPUVAddr descriptor_addr{tic_addr + handle.image * sizeof(Tegra::Texture::TICEntry)}; |
|||
Tegra::Texture::TICEntry entry; |
|||
gpu_memory->ReadBlock(descriptor_addr, &entry, sizeof(entry)); |
|||
const Shader::TextureType result{ConvertType(entry)}; |
|||
texture_types.emplace(raw, result); |
|||
return result; |
|||
} |
|||
|
|||
GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, |
|||
Tegra::MemoryManager& gpu_memory_, |
|||
Maxwell::ShaderProgram program, GPUVAddr program_base_, |
|||
u32 start_address_) |
|||
: GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} { |
|||
gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph)); |
|||
switch (program) { |
|||
case Maxwell::ShaderProgram::VertexA: |
|||
stage = Shader::Stage::VertexA; |
|||
stage_index = 0; |
|||
break; |
|||
case Maxwell::ShaderProgram::VertexB: |
|||
stage = Shader::Stage::VertexB; |
|||
stage_index = 0; |
|||
break; |
|||
case Maxwell::ShaderProgram::TesselationControl: |
|||
stage = Shader::Stage::TessellationControl; |
|||
stage_index = 1; |
|||
break; |
|||
case Maxwell::ShaderProgram::TesselationEval: |
|||
stage = Shader::Stage::TessellationEval; |
|||
stage_index = 2; |
|||
break; |
|||
case Maxwell::ShaderProgram::Geometry: |
|||
stage = Shader::Stage::Geometry; |
|||
stage_index = 3; |
|||
break; |
|||
case Maxwell::ShaderProgram::Fragment: |
|||
stage = Shader::Stage::Fragment; |
|||
stage_index = 4; |
|||
break; |
|||
default: |
|||
UNREACHABLE_MSG("Invalid program={}", program); |
|||
break; |
|||
} |
|||
const u64 local_size{sph.LocalMemorySize()}; |
|||
ASSERT(local_size <= std::numeric_limits<u32>::max()); |
|||
local_memory_size = static_cast<u32>(local_size); |
|||
texture_bound = maxwell3d->regs.tex_cb_index; |
|||
} |
|||
|
|||
u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { |
|||
const auto& cbuf{maxwell3d->state.shader_stages[stage_index].const_buffers[cbuf_index]}; |
|||
ASSERT(cbuf.enabled); |
|||
u32 value{}; |
|||
if (cbuf_offset < cbuf.size) { |
|||
value = gpu_memory->Read<u32>(cbuf.address + cbuf_offset); |
|||
} |
|||
cbuf_values.emplace(MakeCbufKey(cbuf_index, cbuf_offset), value); |
|||
return value; |
|||
} |
|||
|
|||
Shader::TextureType GraphicsEnvironment::ReadTextureType(u32 handle) { |
|||
const auto& regs{maxwell3d->regs}; |
|||
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; |
|||
return ReadTextureTypeImpl(regs.tic.Address(), regs.tic.limit, via_header_index, handle); |
|||
} |
|||
|
|||
ComputeEnvironment::ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_, |
|||
Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_, |
|||
u32 start_address_) |
|||
: GenericEnvironment{gpu_memory_, program_base_, start_address_}, kepler_compute{ |
|||
&kepler_compute_} { |
|||
const auto& qmd{kepler_compute->launch_description}; |
|||
stage = Shader::Stage::Compute; |
|||
local_memory_size = qmd.local_pos_alloc; |
|||
texture_bound = kepler_compute->regs.tex_cb_index; |
|||
shared_memory_size = qmd.shared_alloc; |
|||
workgroup_size = {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z}; |
|||
} |
|||
|
|||
u32 ComputeEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { |
|||
const auto& qmd{kepler_compute->launch_description}; |
|||
ASSERT(((qmd.const_buffer_enable_mask.Value() >> cbuf_index) & 1) != 0); |
|||
const auto& cbuf{qmd.const_buffer_config[cbuf_index]}; |
|||
u32 value{}; |
|||
if (cbuf_offset < cbuf.size) { |
|||
value = gpu_memory->Read<u32>(cbuf.Address() + cbuf_offset); |
|||
} |
|||
cbuf_values.emplace(MakeCbufKey(cbuf_index, cbuf_offset), value); |
|||
return value; |
|||
} |
|||
|
|||
Shader::TextureType ComputeEnvironment::ReadTextureType(u32 handle) { |
|||
const auto& regs{kepler_compute->regs}; |
|||
const auto& qmd{kepler_compute->launch_description}; |
|||
return ReadTextureTypeImpl(regs.tic.Address(), regs.tic.limit, qmd.linked_tsc != 0, handle); |
|||
} |
|||
|
|||
void FileEnvironment::Deserialize(std::ifstream& file) { |
|||
u64 code_size{}; |
|||
u64 num_texture_types{}; |
|||
u64 num_cbuf_values{}; |
|||
file.read(reinterpret_cast<char*>(&code_size), sizeof(code_size)) |
|||
.read(reinterpret_cast<char*>(&num_texture_types), sizeof(num_texture_types)) |
|||
.read(reinterpret_cast<char*>(&num_cbuf_values), sizeof(num_cbuf_values)) |
|||
.read(reinterpret_cast<char*>(&local_memory_size), sizeof(local_memory_size)) |
|||
.read(reinterpret_cast<char*>(&texture_bound), sizeof(texture_bound)) |
|||
.read(reinterpret_cast<char*>(&start_address), sizeof(start_address)) |
|||
.read(reinterpret_cast<char*>(&read_lowest), sizeof(read_lowest)) |
|||
.read(reinterpret_cast<char*>(&read_highest), sizeof(read_highest)) |
|||
.read(reinterpret_cast<char*>(&stage), sizeof(stage)); |
|||
code = std::make_unique<u64[]>(Common::DivCeil(code_size, sizeof(u64))); |
|||
file.read(reinterpret_cast<char*>(code.get()), code_size); |
|||
for (size_t i = 0; i < num_texture_types; ++i) { |
|||
u32 key; |
|||
Shader::TextureType type; |
|||
file.read(reinterpret_cast<char*>(&key), sizeof(key)) |
|||
.read(reinterpret_cast<char*>(&type), sizeof(type)); |
|||
texture_types.emplace(key, type); |
|||
} |
|||
for (size_t i = 0; i < num_cbuf_values; ++i) { |
|||
u64 key; |
|||
u32 value; |
|||
file.read(reinterpret_cast<char*>(&key), sizeof(key)) |
|||
.read(reinterpret_cast<char*>(&value), sizeof(value)); |
|||
cbuf_values.emplace(key, value); |
|||
} |
|||
if (stage == Shader::Stage::Compute) { |
|||
file.read(reinterpret_cast<char*>(&workgroup_size), sizeof(workgroup_size)) |
|||
.read(reinterpret_cast<char*>(&shared_memory_size), sizeof(shared_memory_size)); |
|||
} else { |
|||
file.read(reinterpret_cast<char*>(&sph), sizeof(sph)); |
|||
} |
|||
} |
|||
|
|||
u64 FileEnvironment::ReadInstruction(u32 address) { |
|||
if (address < read_lowest || address > read_highest) { |
|||
throw Shader::LogicError("Out of bounds address {}", address); |
|||
} |
|||
return code[(address - read_lowest) / sizeof(u64)]; |
|||
} |
|||
|
|||
u32 FileEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { |
|||
const auto it{cbuf_values.find(MakeCbufKey(cbuf_index, cbuf_offset))}; |
|||
if (it == cbuf_values.end()) { |
|||
throw Shader::LogicError("Uncached read texture type"); |
|||
} |
|||
return it->second; |
|||
} |
|||
|
|||
Shader::TextureType FileEnvironment::ReadTextureType(u32 handle) { |
|||
const auto it{texture_types.find(handle)}; |
|||
if (it == texture_types.end()) { |
|||
throw Shader::LogicError("Uncached read texture type"); |
|||
} |
|||
return it->second; |
|||
} |
|||
|
|||
u32 FileEnvironment::LocalMemorySize() const { |
|||
return local_memory_size; |
|||
} |
|||
|
|||
u32 FileEnvironment::SharedMemorySize() const { |
|||
return shared_memory_size; |
|||
} |
|||
|
|||
u32 FileEnvironment::TextureBoundBuffer() const { |
|||
return texture_bound; |
|||
} |
|||
|
|||
std::array<u32, 3> FileEnvironment::WorkgroupSize() const { |
|||
return workgroup_size; |
|||
} |
|||
|
|||
void SerializePipeline(std::span<const char> key, std::span<const GenericEnvironment* const> envs, |
|||
const std::filesystem::path& filename) try { |
|||
std::ofstream file(filename, std::ios::binary | std::ios::ate | std::ios::app); |
|||
file.exceptions(std::ifstream::failbit); |
|||
if (!file.is_open()) { |
|||
LOG_ERROR(Common_Filesystem, "Failed to open pipeline cache file {}", |
|||
Common::FS::PathToUTF8String(filename)); |
|||
return; |
|||
} |
|||
if (file.tellp() == 0) { |
|||
// Write header
|
|||
file.write(MAGIC_NUMBER.data(), MAGIC_NUMBER.size()) |
|||
.write(reinterpret_cast<const char*>(&CACHE_VERSION), sizeof(CACHE_VERSION)); |
|||
} |
|||
if (!std::ranges::all_of(envs, &GenericEnvironment::CanBeSerialized)) { |
|||
return; |
|||
} |
|||
const u32 num_envs{static_cast<u32>(envs.size())}; |
|||
file.write(reinterpret_cast<const char*>(&num_envs), sizeof(num_envs)); |
|||
for (const GenericEnvironment* const env : envs) { |
|||
env->Serialize(file); |
|||
} |
|||
file.write(key.data(), key.size_bytes()); |
|||
|
|||
} catch (const std::ios_base::failure& e) { |
|||
LOG_ERROR(Common_Filesystem, "{}", e.what()); |
|||
if (!Common::FS::RemoveFile(filename)) { |
|||
LOG_ERROR(Common_Filesystem, "Failed to delete pipeline cache file {}", |
|||
Common::FS::PathToUTF8String(filename)); |
|||
} |
|||
} |
|||
|
|||
void LoadPipelines( |
|||
std::stop_token stop_loading, const std::filesystem::path& filename, |
|||
Common::UniqueFunction<void, std::ifstream&, FileEnvironment> load_compute, |
|||
Common::UniqueFunction<void, std::ifstream&, std::vector<FileEnvironment>> load_graphics) try { |
|||
std::ifstream file(filename, std::ios::binary | std::ios::ate); |
|||
if (!file.is_open()) { |
|||
return; |
|||
} |
|||
file.exceptions(std::ifstream::failbit); |
|||
const auto end{file.tellg()}; |
|||
file.seekg(0, std::ios::beg); |
|||
|
|||
std::array<char, 8> magic_number; |
|||
u32 cache_version; |
|||
file.read(magic_number.data(), magic_number.size()) |
|||
.read(reinterpret_cast<char*>(&cache_version), sizeof(cache_version)); |
|||
if (magic_number != MAGIC_NUMBER || cache_version != CACHE_VERSION) { |
|||
file.close(); |
|||
if (Common::FS::RemoveFile(filename)) { |
|||
if (magic_number != MAGIC_NUMBER) { |
|||
LOG_ERROR(Common_Filesystem, "Invalid pipeline cache file"); |
|||
} |
|||
if (cache_version != CACHE_VERSION) { |
|||
LOG_INFO(Common_Filesystem, "Deleting old pipeline cache"); |
|||
} |
|||
} else { |
|||
LOG_ERROR(Common_Filesystem, |
|||
"Invalid pipeline cache file and failed to delete it in \"{}\"", |
|||
Common::FS::PathToUTF8String(filename)); |
|||
} |
|||
return; |
|||
} |
|||
while (file.tellg() != end) { |
|||
if (stop_loading.stop_requested()) { |
|||
return; |
|||
} |
|||
u32 num_envs{}; |
|||
file.read(reinterpret_cast<char*>(&num_envs), sizeof(num_envs)); |
|||
std::vector<FileEnvironment> envs(num_envs); |
|||
for (FileEnvironment& env : envs) { |
|||
env.Deserialize(file); |
|||
} |
|||
if (envs.front().ShaderStage() == Shader::Stage::Compute) { |
|||
load_compute(file, std::move(envs.front())); |
|||
} else { |
|||
load_graphics(file, std::move(envs)); |
|||
} |
|||
} |
|||
|
|||
} catch (const std::ios_base::failure& e) { |
|||
LOG_ERROR(Common_Filesystem, "{}", e.what()); |
|||
if (!Common::FS::RemoveFile(filename)) { |
|||
LOG_ERROR(Common_Filesystem, "Failed to delete pipeline cache file {}", |
|||
Common::FS::PathToUTF8String(filename)); |
|||
} |
|||
} |
|||
|
|||
} // namespace VideoCommon
|
|||
@ -0,0 +1,198 @@ |
|||
// Copyright 2021 yuzu Emulator Project |
|||
// Licensed under GPLv2 or any later version |
|||
// Refer to the license.txt file included. |
|||
|
|||
#pragma once |
|||
|
|||
#include <array> |
|||
#include <atomic> |
|||
#include <filesystem> |
|||
#include <iosfwd> |
|||
#include <limits> |
|||
#include <memory> |
|||
#include <optional> |
|||
#include <span> |
|||
#include <type_traits> |
|||
#include <unordered_map> |
|||
#include <vector> |
|||
|
|||
#include "common/common_types.h" |
|||
#include "common/unique_function.h" |
|||
#include "shader_recompiler/environment.h" |
|||
#include "video_core/engines/kepler_compute.h" |
|||
#include "video_core/engines/maxwell_3d.h" |
|||
#include "video_core/textures/texture.h" |
|||
|
|||
namespace Tegra { |
|||
class Memorymanager; |
|||
} |
|||
|
|||
namespace VideoCommon { |
|||
|
|||
struct TextureHandle { |
|||
explicit TextureHandle(u32 data, bool via_header_index) { |
|||
if (via_header_index) { |
|||
image = data; |
|||
sampler = data; |
|||
} else { |
|||
const Tegra::Texture::TextureHandle handle{data}; |
|||
image = handle.tic_id; |
|||
sampler = via_header_index ? image : handle.tsc_id.Value(); |
|||
} |
|||
} |
|||
|
|||
u32 image; |
|||
u32 sampler; |
|||
}; |
|||
|
|||
class GenericEnvironment : public Shader::Environment { |
|||
public: |
|||
explicit GenericEnvironment() = default; |
|||
explicit GenericEnvironment(Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_, |
|||
u32 start_address_); |
|||
|
|||
~GenericEnvironment() override; |
|||
|
|||
[[nodiscard]] u32 TextureBoundBuffer() const final; |
|||
|
|||
[[nodiscard]] u32 LocalMemorySize() const final; |
|||
|
|||
[[nodiscard]] u32 SharedMemorySize() const final; |
|||
|
|||
[[nodiscard]] std::array<u32, 3> WorkgroupSize() const final; |
|||
|
|||
[[nodiscard]] u64 ReadInstruction(u32 address) final; |
|||
|
|||
[[nodiscard]] std::optional<u64> Analyze(); |
|||
|
|||
void SetCachedSize(size_t size_bytes); |
|||
|
|||
[[nodiscard]] size_t CachedSize() const noexcept; |
|||
|
|||
[[nodiscard]] size_t ReadSize() const noexcept; |
|||
|
|||
[[nodiscard]] bool CanBeSerialized() const noexcept; |
|||
|
|||
[[nodiscard]] u64 CalculateHash() const; |
|||
|
|||
void Serialize(std::ofstream& file) const; |
|||
|
|||
protected: |
|||
std::optional<u64> TryFindSize(); |
|||
|
|||
Shader::TextureType ReadTextureTypeImpl(GPUVAddr tic_addr, u32 tic_limit, bool via_header_index, |
|||
u32 raw); |
|||
|
|||
Tegra::MemoryManager* gpu_memory{}; |
|||
GPUVAddr program_base{}; |
|||
|
|||
std::vector<u64> code; |
|||
std::unordered_map<u32, Shader::TextureType> texture_types; |
|||
std::unordered_map<u64, u32> cbuf_values; |
|||
|
|||
u32 local_memory_size{}; |
|||
u32 texture_bound{}; |
|||
u32 shared_memory_size{}; |
|||
std::array<u32, 3> workgroup_size{}; |
|||
|
|||
u32 read_lowest = std::numeric_limits<u32>::max(); |
|||
u32 read_highest = 0; |
|||
|
|||
u32 cached_lowest = std::numeric_limits<u32>::max(); |
|||
u32 cached_highest = 0; |
|||
|
|||
bool has_unbound_instructions = false; |
|||
}; |
|||
|
|||
class GraphicsEnvironment final : public GenericEnvironment { |
|||
public: |
|||
explicit GraphicsEnvironment() = default; |
|||
explicit GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, |
|||
Tegra::MemoryManager& gpu_memory_, |
|||
Tegra::Engines::Maxwell3D::Regs::ShaderProgram program, |
|||
GPUVAddr program_base_, u32 start_address_); |
|||
|
|||
~GraphicsEnvironment() override = default; |
|||
|
|||
u32 ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) override; |
|||
|
|||
Shader::TextureType ReadTextureType(u32 handle) override; |
|||
|
|||
private: |
|||
Tegra::Engines::Maxwell3D* maxwell3d{}; |
|||
size_t stage_index{}; |
|||
}; |
|||
|
|||
class ComputeEnvironment final : public GenericEnvironment { |
|||
public: |
|||
explicit ComputeEnvironment() = default; |
|||
explicit ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_, |
|||
Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_, |
|||
u32 start_address_); |
|||
|
|||
~ComputeEnvironment() override = default; |
|||
|
|||
u32 ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) override; |
|||
|
|||
Shader::TextureType ReadTextureType(u32 handle) override; |
|||
|
|||
private: |
|||
Tegra::Engines::KeplerCompute* kepler_compute{}; |
|||
}; |
|||
|
|||
class FileEnvironment final : public Shader::Environment { |
|||
public: |
|||
FileEnvironment() = default; |
|||
~FileEnvironment() override = default; |
|||
|
|||
FileEnvironment& operator=(FileEnvironment&&) noexcept = default; |
|||
FileEnvironment(FileEnvironment&&) noexcept = default; |
|||
|
|||
FileEnvironment& operator=(const FileEnvironment&) = delete; |
|||
FileEnvironment(const FileEnvironment&) = delete; |
|||
|
|||
void Deserialize(std::ifstream& file); |
|||
|
|||
[[nodiscard]] u64 ReadInstruction(u32 address) override; |
|||
|
|||
[[nodiscard]] u32 ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) override; |
|||
|
|||
[[nodiscard]] Shader::TextureType ReadTextureType(u32 handle) override; |
|||
|
|||
[[nodiscard]] u32 LocalMemorySize() const override; |
|||
|
|||
[[nodiscard]] u32 SharedMemorySize() const override; |
|||
|
|||
[[nodiscard]] u32 TextureBoundBuffer() const override; |
|||
|
|||
[[nodiscard]] std::array<u32, 3> WorkgroupSize() const override; |
|||
|
|||
private: |
|||
std::unique_ptr<u64[]> code; |
|||
std::unordered_map<u32, Shader::TextureType> texture_types; |
|||
std::unordered_map<u64, u32> cbuf_values; |
|||
std::array<u32, 3> workgroup_size{}; |
|||
u32 local_memory_size{}; |
|||
u32 shared_memory_size{}; |
|||
u32 texture_bound{}; |
|||
u32 read_lowest{}; |
|||
u32 read_highest{}; |
|||
}; |
|||
|
|||
void SerializePipeline(std::span<const char> key, std::span<const GenericEnvironment* const> envs, |
|||
const std::filesystem::path& filename); |
|||
|
|||
template <typename Key, typename Envs> |
|||
void SerializePipeline(const Key& key, const Envs& envs, const std::filesystem::path& filename) { |
|||
static_assert(std::is_trivially_copyable_v<Key>); |
|||
static_assert(std::has_unique_object_representations_v<Key>); |
|||
SerializePipeline(std::span(reinterpret_cast<const char*>(&key), sizeof(key)), |
|||
std::span(envs.data(), envs.size()), filename); |
|||
} |
|||
|
|||
void LoadPipelines( |
|||
std::stop_token stop_loading, const std::filesystem::path& filename, |
|||
Common::UniqueFunction<void, std::ifstream&, FileEnvironment> load_compute, |
|||
Common::UniqueFunction<void, std::ifstream&, std::vector<FileEnvironment>> load_graphics); |
|||
|
|||
} // namespace VideoCommon |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue