committed by
ameerj
33 changed files with 1489 additions and 342 deletions
-
3src/shader_recompiler/CMakeLists.txt
-
69src/shader_recompiler/backend/spirv/emit_context.cpp
-
7src/shader_recompiler/backend/spirv/emit_context.h
-
12src/shader_recompiler/backend/spirv/emit_spirv.cpp
-
32src/shader_recompiler/backend/spirv/emit_spirv.h
-
48src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
-
146src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
-
18src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
-
2src/shader_recompiler/environment.h
-
4src/shader_recompiler/file_environment.cpp
-
4src/shader_recompiler/file_environment.h
-
133src/shader_recompiler/frontend/ir/ir_emitter.cpp
-
21src/shader_recompiler/frontend/ir/ir_emitter.h
-
73src/shader_recompiler/frontend/ir/microinstruction.cpp
-
22src/shader_recompiler/frontend/ir/microinstruction.h
-
10src/shader_recompiler/frontend/ir/modifiers.h
-
2src/shader_recompiler/frontend/ir/opcodes.cpp
-
569src/shader_recompiler/frontend/ir/opcodes.inc
-
11src/shader_recompiler/frontend/ir/reg.h
-
1src/shader_recompiler/frontend/ir/value.h
-
4src/shader_recompiler/frontend/maxwell/maxwell.inc
-
1src/shader_recompiler/frontend/maxwell/program.cpp
-
8src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
-
232src/shader_recompiler/frontend/maxwell/translate/impl/texture_sample.cpp
-
19src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
-
15src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
-
2src/shader_recompiler/ir_opt/passes.h
-
199src/shader_recompiler/ir_opt/texture_pass.cpp
-
52src/shader_recompiler/shader_info.h
-
101src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
-
4src/video_core/renderer_vulkan/vk_compute_pipeline.h
-
4src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
-
3src/video_core/renderer_vulkan/vk_rasterizer.cpp
@ -0,0 +1,146 @@ |
|||||
|
// Copyright 2021 yuzu Emulator Project
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include <boost/container/static_vector.hpp>
|
||||
|
|
||||
|
#include "shader_recompiler/backend/spirv/emit_spirv.h"
|
||||
|
#include "shader_recompiler/frontend/ir/modifiers.h"
|
||||
|
|
||||
|
namespace Shader::Backend::SPIRV { |
||||
|
namespace { |
||||
|
class ImageOperands { |
||||
|
public: |
||||
|
explicit ImageOperands(EmitContext& ctx, bool has_bias, bool has_lod, bool has_lod_clamp, |
||||
|
Id lod, Id offset) { |
||||
|
if (has_bias) { |
||||
|
const Id bias{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod}; |
||||
|
Add(spv::ImageOperandsMask::Bias, bias); |
||||
|
} |
||||
|
if (has_lod) { |
||||
|
const Id lod_value{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod}; |
||||
|
Add(spv::ImageOperandsMask::Lod, lod_value); |
||||
|
} |
||||
|
if (Sirit::ValidId(offset)) { |
||||
|
Add(spv::ImageOperandsMask::Offset, offset); |
||||
|
} |
||||
|
if (has_lod_clamp) { |
||||
|
const Id lod_clamp{has_bias ? ctx.OpCompositeExtract(ctx.F32[1], lod, 1) : lod}; |
||||
|
Add(spv::ImageOperandsMask::MinLod, lod_clamp); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void Add(spv::ImageOperandsMask new_mask, Id value) { |
||||
|
mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) | |
||||
|
static_cast<unsigned>(new_mask)); |
||||
|
operands.push_back(value); |
||||
|
} |
||||
|
|
||||
|
std::span<const Id> Span() const noexcept { |
||||
|
return std::span{operands.data(), operands.size()}; |
||||
|
} |
||||
|
|
||||
|
spv::ImageOperandsMask Mask() const noexcept { |
||||
|
return mask; |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
boost::container::static_vector<Id, 3> operands; |
||||
|
spv::ImageOperandsMask mask{}; |
||||
|
}; |
||||
|
|
||||
|
Id Texture(EmitContext& ctx, const IR::Value& index) { |
||||
|
if (index.IsImmediate()) { |
||||
|
const TextureDefinition def{ctx.textures.at(index.U32())}; |
||||
|
return ctx.OpLoad(def.type, def.id); |
||||
|
} |
||||
|
throw NotImplementedException("Indirect texture sample"); |
||||
|
} |
||||
|
|
||||
|
template <typename MethodPtrType, typename... Args> |
||||
|
Id Emit(MethodPtrType sparse_ptr, MethodPtrType non_sparse_ptr, EmitContext& ctx, IR::Inst* inst, |
||||
|
Id result_type, Args&&... args) { |
||||
|
IR::Inst* const sparse{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)}; |
||||
|
if (!sparse) { |
||||
|
return (ctx.*non_sparse_ptr)(result_type, std::forward<Args>(args)...); |
||||
|
} |
||||
|
const Id struct_type{ctx.TypeStruct(ctx.U32[1], result_type)}; |
||||
|
const Id sample{(ctx.*sparse_ptr)(struct_type, std::forward<Args>(args)...)}; |
||||
|
const Id resident_code{ctx.OpCompositeExtract(ctx.U32[1], sample, 0U)}; |
||||
|
sparse->SetDefinition(ctx.OpImageSparseTexelsResident(ctx.U1, resident_code)); |
||||
|
sparse->Invalidate(); |
||||
|
return ctx.OpCompositeExtract(result_type, sample, 1U); |
||||
|
} |
||||
|
} // Anonymous namespace
|
||||
|
|
||||
|
Id EmitBindlessImageSampleImplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBindlessImageSampleExplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBoundImageSampleImplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBoundImageSampleExplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBoundImageSampleDrefImplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitBoundImageSampleDrefExplicitLod(EmitContext&) { |
||||
|
throw LogicError("Unreachable instruction"); |
||||
|
} |
||||
|
|
||||
|
Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, |
||||
|
Id bias_lc, Id offset) { |
||||
|
const auto info{inst->Flags<IR::TextureInstInfo>()}; |
||||
|
const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc, |
||||
|
offset); |
||||
|
return Emit(&EmitContext::OpImageSparseSampleImplicitLod, |
||||
|
&EmitContext::OpImageSampleImplicitLod, ctx, inst, ctx.F32[4], Texture(ctx, index), |
||||
|
coords, operands.Mask(), operands.Span()); |
||||
|
} |
||||
|
|
||||
|
Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, |
||||
|
Id lod_lc, Id offset) { |
||||
|
const auto info{inst->Flags<IR::TextureInstInfo>()}; |
||||
|
const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset); |
||||
|
return Emit(&EmitContext::OpImageSparseSampleExplicitLod, |
||||
|
&EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], Texture(ctx, index), |
||||
|
coords, operands.Mask(), operands.Span()); |
||||
|
} |
||||
|
|
||||
|
Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, |
||||
|
Id coords, Id dref, Id bias_lc, Id offset) { |
||||
|
const auto info{inst->Flags<IR::TextureInstInfo>()}; |
||||
|
const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc, |
||||
|
offset); |
||||
|
return Emit(&EmitContext::OpImageSparseSampleDrefImplicitLod, |
||||
|
&EmitContext::OpImageSampleDrefImplicitLod, ctx, inst, ctx.F32[1], |
||||
|
Texture(ctx, index), coords, dref, operands.Mask(), operands.Span()); |
||||
|
} |
||||
|
|
||||
|
Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, |
||||
|
Id coords, Id dref, Id lod_lc, Id offset) { |
||||
|
const auto info{inst->Flags<IR::TextureInstInfo>()}; |
||||
|
const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset); |
||||
|
return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod, |
||||
|
&EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1], |
||||
|
Texture(ctx, index), coords, dref, operands.Mask(), operands.Span()); |
||||
|
} |
||||
|
|
||||
|
} // namespace Shader::Backend::SPIRV
|
||||
@ -0,0 +1,232 @@ |
|||||
|
// Copyright 2021 yuzu Emulator Project
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include <optional>
|
||||
|
|
||||
|
#include "common/bit_field.h"
|
||||
|
#include "common/common_types.h"
|
||||
|
#include "shader_recompiler/frontend/ir/modifiers.h"
|
||||
|
#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
|
||||
|
|
||||
|
namespace Shader::Maxwell { |
||||
|
namespace { |
||||
|
enum class Blod : u64 { |
||||
|
None, |
||||
|
LZ, |
||||
|
LB, |
||||
|
LL, |
||||
|
INVALIDBLOD4, |
||||
|
INVALIDBLOD5, |
||||
|
LBA, |
||||
|
LLA, |
||||
|
}; |
||||
|
|
||||
|
enum class TextureType : u64 { |
||||
|
_1D, |
||||
|
ARRAY_1D, |
||||
|
_2D, |
||||
|
ARRAY_2D, |
||||
|
_3D, |
||||
|
ARRAY_3D, |
||||
|
CUBE, |
||||
|
ARRAY_CUBE, |
||||
|
}; |
||||
|
|
||||
|
Shader::TextureType GetType(TextureType type, bool dc) { |
||||
|
switch (type) { |
||||
|
case TextureType::_1D: |
||||
|
return dc ? Shader::TextureType::Shadow1D : Shader::TextureType::Color1D; |
||||
|
case TextureType::ARRAY_1D: |
||||
|
return dc ? Shader::TextureType::ShadowArray1D : Shader::TextureType::ColorArray1D; |
||||
|
case TextureType::_2D: |
||||
|
return dc ? Shader::TextureType::Shadow2D : Shader::TextureType::Color2D; |
||||
|
case TextureType::ARRAY_2D: |
||||
|
return dc ? Shader::TextureType::ShadowArray2D : Shader::TextureType::ColorArray2D; |
||||
|
case TextureType::_3D: |
||||
|
return dc ? Shader::TextureType::Shadow3D : Shader::TextureType::Color3D; |
||||
|
case TextureType::ARRAY_3D: |
||||
|
throw NotImplementedException("3D array texture type"); |
||||
|
case TextureType::CUBE: |
||||
|
return dc ? Shader::TextureType::ShadowCube : Shader::TextureType::ColorCube; |
||||
|
case TextureType::ARRAY_CUBE: |
||||
|
return dc ? Shader::TextureType::ShadowArrayCube : Shader::TextureType::ColorArrayCube; |
||||
|
} |
||||
|
throw NotImplementedException("Invalid texture type {}", type); |
||||
|
} |
||||
|
|
||||
|
IR::Value MakeCoords(TranslatorVisitor& v, IR::Reg reg, TextureType type) { |
||||
|
const auto read_array{[&]() -> IR::F32 { return v.ir.ConvertUToF(32, v.X(reg)); }}; |
||||
|
switch (type) { |
||||
|
case TextureType::_1D: |
||||
|
return v.F(reg); |
||||
|
case TextureType::ARRAY_1D: |
||||
|
return v.ir.CompositeConstruct(read_array(), v.F(reg + 1)); |
||||
|
case TextureType::_2D: |
||||
|
return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1)); |
||||
|
case TextureType::ARRAY_2D: |
||||
|
return v.ir.CompositeConstruct(read_array(), v.F(reg + 1), v.F(reg + 2)); |
||||
|
case TextureType::_3D: |
||||
|
return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1), v.F(reg + 2)); |
||||
|
case TextureType::ARRAY_3D: |
||||
|
throw NotImplementedException("3D array texture type"); |
||||
|
case TextureType::CUBE: |
||||
|
return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1), v.F(reg + 2)); |
||||
|
case TextureType::ARRAY_CUBE: |
||||
|
return v.ir.CompositeConstruct(read_array(), v.F(reg + 1), v.F(reg + 2), v.F(reg + 3)); |
||||
|
} |
||||
|
throw NotImplementedException("Invalid texture type {}", type); |
||||
|
} |
||||
|
|
||||
|
IR::F32 MakeLod(TranslatorVisitor& v, IR::Reg& reg, Blod blod) { |
||||
|
switch (blod) { |
||||
|
case Blod::None: |
||||
|
return v.ir.Imm32(0.0f); |
||||
|
case Blod::LZ: |
||||
|
return v.ir.Imm32(0.0f); |
||||
|
case Blod::LB: |
||||
|
case Blod::LL: |
||||
|
case Blod::LBA: |
||||
|
case Blod::LLA: |
||||
|
return v.F(reg++); |
||||
|
case Blod::INVALIDBLOD4: |
||||
|
case Blod::INVALIDBLOD5: |
||||
|
break; |
||||
|
} |
||||
|
throw NotImplementedException("Invalid blod {}", blod); |
||||
|
} |
||||
|
|
||||
|
IR::Value MakeOffset(TranslatorVisitor& v, IR::Reg& reg, TextureType type) { |
||||
|
const IR::U32 value{v.X(reg++)}; |
||||
|
switch (type) { |
||||
|
case TextureType::_1D: |
||||
|
case TextureType::ARRAY_1D: |
||||
|
return v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4)); |
||||
|
case TextureType::_2D: |
||||
|
case TextureType::ARRAY_2D: |
||||
|
return v.ir.CompositeConstruct(v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4)), |
||||
|
v.ir.BitFieldExtract(value, v.ir.Imm32(4), v.ir.Imm32(4))); |
||||
|
case TextureType::_3D: |
||||
|
case TextureType::ARRAY_3D: |
||||
|
return v.ir.CompositeConstruct(v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4)), |
||||
|
v.ir.BitFieldExtract(value, v.ir.Imm32(4), v.ir.Imm32(4)), |
||||
|
v.ir.BitFieldExtract(value, v.ir.Imm32(8), v.ir.Imm32(4))); |
||||
|
case TextureType::CUBE: |
||||
|
case TextureType::ARRAY_CUBE: |
||||
|
throw NotImplementedException("Illegal offset on CUBE sample"); |
||||
|
} |
||||
|
throw NotImplementedException("Invalid texture type {}", type); |
||||
|
} |
||||
|
|
||||
|
bool HasExplicitLod(Blod blod) { |
||||
|
switch (blod) { |
||||
|
case Blod::LL: |
||||
|
case Blod::LLA: |
||||
|
case Blod::LZ: |
||||
|
return true; |
||||
|
default: |
||||
|
return false; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
void Impl(TranslatorVisitor& v, u64 insn, bool aoffi, Blod blod, bool lc, |
||||
|
std::optional<u32> cbuf_offset) { |
||||
|
union { |
||||
|
u64 raw; |
||||
|
BitField<35, 1, u64> ndv; |
||||
|
BitField<49, 1, u64> nodep; |
||||
|
BitField<50, 1, u64> dc; |
||||
|
BitField<51, 3, IR::Pred> sparse_pred; |
||||
|
BitField<0, 8, IR::Reg> dest_reg; |
||||
|
BitField<8, 8, IR::Reg> coord_reg; |
||||
|
BitField<20, 8, IR::Reg> meta_reg; |
||||
|
BitField<28, 3, TextureType> type; |
||||
|
BitField<31, 4, u64> mask; |
||||
|
} const tex{insn}; |
||||
|
|
||||
|
if (lc) { |
||||
|
throw NotImplementedException("LC"); |
||||
|
} |
||||
|
const IR::Value coords{MakeCoords(v, tex.coord_reg, tex.type)}; |
||||
|
|
||||
|
IR::Reg meta_reg{tex.meta_reg}; |
||||
|
IR::Value handle; |
||||
|
IR::Value offset; |
||||
|
IR::F32 dref; |
||||
|
IR::F32 lod_clamp; |
||||
|
if (cbuf_offset) { |
||||
|
handle = v.ir.Imm32(*cbuf_offset); |
||||
|
} else { |
||||
|
handle = v.X(meta_reg++); |
||||
|
} |
||||
|
const IR::F32 lod{MakeLod(v, meta_reg, blod)}; |
||||
|
if (aoffi) { |
||||
|
offset = MakeOffset(v, meta_reg, tex.type); |
||||
|
} |
||||
|
if (tex.dc != 0) { |
||||
|
dref = v.F(meta_reg++); |
||||
|
} |
||||
|
IR::TextureInstInfo info{}; |
||||
|
info.type.Assign(GetType(tex.type, tex.dc != 0)); |
||||
|
info.has_bias.Assign(blod == Blod::LB || blod == Blod::LBA ? 1 : 0); |
||||
|
info.has_lod_clamp.Assign(lc ? 1 : 0); |
||||
|
|
||||
|
const IR::Value sample{[&]() -> IR::Value { |
||||
|
if (tex.dc == 0) { |
||||
|
if (HasExplicitLod(blod)) { |
||||
|
return v.ir.ImageSampleExplicitLod(handle, coords, lod, offset, lod_clamp, info); |
||||
|
} else { |
||||
|
return v.ir.ImageSampleImplicitLod(handle, coords, lod, offset, lod_clamp, info); |
||||
|
} |
||||
|
} |
||||
|
if (HasExplicitLod(blod)) { |
||||
|
return v.ir.ImageSampleDrefExplicitLod(handle, coords, dref, lod, offset, lod_clamp, |
||||
|
info); |
||||
|
} else { |
||||
|
return v.ir.ImageSampleDrefImplicitLod(handle, coords, dref, lod, offset, lod_clamp, |
||||
|
info); |
||||
|
} |
||||
|
}()}; |
||||
|
|
||||
|
for (int element = 0; element < 4; ++element) { |
||||
|
if (((tex.mask >> element) & 1) == 0) { |
||||
|
continue; |
||||
|
} |
||||
|
IR::F32 value; |
||||
|
if (tex.dc != 0) { |
||||
|
value = element < 3 ? IR::F32{sample} : v.ir.Imm32(1.0f); |
||||
|
} else { |
||||
|
value = IR::F32{v.ir.CompositeExtract(sample, element)}; |
||||
|
} |
||||
|
v.F(tex.dest_reg + element, value); |
||||
|
} |
||||
|
if (tex.sparse_pred != IR::Pred::PT) { |
||||
|
v.ir.SetPred(tex.sparse_pred, v.ir.LogicalNot(v.ir.GetSparseFromOp(sample))); |
||||
|
} |
||||
|
} |
||||
|
} // Anonymous namespace
|
||||
|
|
||||
|
void TranslatorVisitor::TEX(u64 insn) { |
||||
|
union { |
||||
|
u64 raw; |
||||
|
BitField<54, 1, u64> aoffi; |
||||
|
BitField<55, 3, Blod> blod; |
||||
|
BitField<58, 1, u64> lc; |
||||
|
BitField<36, 13, u64> cbuf_offset; |
||||
|
} const tex{insn}; |
||||
|
|
||||
|
Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, static_cast<u32>(tex.cbuf_offset)); |
||||
|
} |
||||
|
|
||||
|
void TranslatorVisitor::TEX_b(u64 insn) { |
||||
|
union { |
||||
|
u64 raw; |
||||
|
BitField<36, 1, u64> aoffi; |
||||
|
BitField<37, 3, Blod> blod; |
||||
|
BitField<40, 1, u64> lc; |
||||
|
} const tex{insn}; |
||||
|
|
||||
|
Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, std::nullopt); |
||||
|
} |
||||
|
|
||||
|
} // namespace Shader::Maxwell
|
||||
@ -0,0 +1,199 @@ |
|||||
|
// Copyright 2021 yuzu Emulator Project
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include <optional>
|
||||
|
|
||||
|
#include <boost/container/flat_set.hpp>
|
||||
|
#include <boost/container/small_vector.hpp>
|
||||
|
|
||||
|
#include "shader_recompiler/environment.h"
|
||||
|
#include "shader_recompiler/frontend/ir/basic_block.h"
|
||||
|
#include "shader_recompiler/frontend/ir/ir_emitter.h"
|
||||
|
#include "shader_recompiler/ir_opt/passes.h"
|
||||
|
#include "shader_recompiler/shader_info.h"
|
||||
|
|
||||
|
namespace Shader::Optimization { |
||||
|
namespace { |
||||
|
struct ConstBufferAddr { |
||||
|
u32 index; |
||||
|
u32 offset; |
||||
|
}; |
||||
|
|
||||
|
struct TextureInst { |
||||
|
ConstBufferAddr cbuf; |
||||
|
IR::Inst* inst; |
||||
|
IR::Block* block; |
||||
|
}; |
||||
|
|
||||
|
using TextureInstVector = boost::container::small_vector<TextureInst, 24>; |
||||
|
|
||||
|
using VisitedBlocks = boost::container::flat_set<IR::Block*, std::less<IR::Block*>, |
||||
|
boost::container::small_vector<IR::Block*, 2>>; |
||||
|
|
||||
|
IR::Opcode IndexedInstruction(const IR::Inst& inst) { |
||||
|
switch (inst.Opcode()) { |
||||
|
case IR::Opcode::BindlessImageSampleImplicitLod: |
||||
|
case IR::Opcode::BoundImageSampleImplicitLod: |
||||
|
return IR::Opcode::ImageSampleImplicitLod; |
||||
|
case IR::Opcode::BoundImageSampleExplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleExplicitLod: |
||||
|
return IR::Opcode::ImageSampleExplicitLod; |
||||
|
case IR::Opcode::BoundImageSampleDrefImplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleDrefImplicitLod: |
||||
|
return IR::Opcode::ImageSampleDrefImplicitLod; |
||||
|
case IR::Opcode::BoundImageSampleDrefExplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleDrefExplicitLod: |
||||
|
return IR::Opcode::ImageSampleDrefExplicitLod; |
||||
|
default: |
||||
|
return IR::Opcode::Void; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
bool IsBindless(const IR::Inst& inst) { |
||||
|
switch (inst.Opcode()) { |
||||
|
case IR::Opcode::BindlessImageSampleImplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleExplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleDrefImplicitLod: |
||||
|
case IR::Opcode::BindlessImageSampleDrefExplicitLod: |
||||
|
return true; |
||||
|
case IR::Opcode::BoundImageSampleImplicitLod: |
||||
|
case IR::Opcode::BoundImageSampleExplicitLod: |
||||
|
case IR::Opcode::BoundImageSampleDrefImplicitLod: |
||||
|
case IR::Opcode::BoundImageSampleDrefExplicitLod: |
||||
|
return false; |
||||
|
default: |
||||
|
throw InvalidArgument("Invalid opcode {}", inst.Opcode()); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
bool IsTextureInstruction(const IR::Inst& inst) { |
||||
|
return IndexedInstruction(inst) != IR::Opcode::Void; |
||||
|
} |
||||
|
|
||||
|
std::optional<ConstBufferAddr> Track(IR::Block* block, const IR::Value& value, |
||||
|
VisitedBlocks& visited) { |
||||
|
if (value.IsImmediate()) { |
||||
|
// Immediates can't be a storage buffer
|
||||
|
return std::nullopt; |
||||
|
} |
||||
|
const IR::Inst* const inst{value.InstRecursive()}; |
||||
|
if (inst->Opcode() == IR::Opcode::GetCbuf) { |
||||
|
const IR::Value index{inst->Arg(0)}; |
||||
|
const IR::Value offset{inst->Arg(1)}; |
||||
|
if (!index.IsImmediate()) { |
||||
|
// Reading a bindless texture from variable indices is valid
|
||||
|
// but not supported here at the moment
|
||||
|
return std::nullopt; |
||||
|
} |
||||
|
if (!offset.IsImmediate()) { |
||||
|
// TODO: Support arrays of textures
|
||||
|
return std::nullopt; |
||||
|
} |
||||
|
return ConstBufferAddr{ |
||||
|
.index{index.U32()}, |
||||
|
.offset{offset.U32()}, |
||||
|
}; |
||||
|
} |
||||
|
// Reversed loops are more likely to find the right result
|
||||
|
for (size_t arg = inst->NumArgs(); arg--;) { |
||||
|
IR::Block* inst_block{block}; |
||||
|
if (inst->Opcode() == IR::Opcode::Phi) { |
||||
|
// If we are going through a phi node, mark the current block as visited
|
||||
|
visited.insert(block); |
||||
|
// and skip already visited blocks to avoid looping forever
|
||||
|
IR::Block* const phi_block{inst->PhiBlock(arg)}; |
||||
|
if (visited.contains(phi_block)) { |
||||
|
// Already visited, skip
|
||||
|
continue; |
||||
|
} |
||||
|
inst_block = phi_block; |
||||
|
} |
||||
|
const std::optional storage_buffer{Track(inst_block, inst->Arg(arg), visited)}; |
||||
|
if (storage_buffer) { |
||||
|
return *storage_buffer; |
||||
|
} |
||||
|
} |
||||
|
return std::nullopt; |
||||
|
} |
||||
|
|
||||
|
TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { |
||||
|
ConstBufferAddr addr; |
||||
|
if (IsBindless(inst)) { |
||||
|
VisitedBlocks visited; |
||||
|
const std::optional<ConstBufferAddr> track_addr{Track(block, IR::Value{&inst}, visited)}; |
||||
|
if (!track_addr) { |
||||
|
throw NotImplementedException("Failed to track bindless texture constant buffer"); |
||||
|
} |
||||
|
addr = *track_addr; |
||||
|
} else { |
||||
|
addr = ConstBufferAddr{ |
||||
|
.index{env.TextureBoundBuffer()}, |
||||
|
.offset{inst.Arg(0).U32()}, |
||||
|
}; |
||||
|
} |
||||
|
return TextureInst{ |
||||
|
.cbuf{addr}, |
||||
|
.inst{&inst}, |
||||
|
.block{block}, |
||||
|
}; |
||||
|
} |
||||
|
|
||||
|
class Descriptors { |
||||
|
public: |
||||
|
explicit Descriptors(TextureDescriptors& descriptors_) : descriptors{descriptors_} {} |
||||
|
|
||||
|
u32 Add(const TextureDescriptor& descriptor) { |
||||
|
// TODO: Handle arrays
|
||||
|
auto it{std::ranges::find_if(descriptors, [&descriptor](const TextureDescriptor& existing) { |
||||
|
return descriptor.cbuf_index == existing.cbuf_index && |
||||
|
descriptor.cbuf_offset == existing.cbuf_offset && |
||||
|
descriptor.type == existing.type; |
||||
|
})}; |
||||
|
if (it != descriptors.end()) { |
||||
|
return static_cast<u32>(std::distance(descriptors.begin(), it)); |
||||
|
} |
||||
|
descriptors.push_back(descriptor); |
||||
|
return static_cast<u32>(descriptors.size()) - 1; |
||||
|
} |
||||
|
|
||||
|
private: |
||||
|
TextureDescriptors& descriptors; |
||||
|
}; |
||||
|
} // Anonymous namespace
|
||||
|
|
||||
|
void TexturePass(Environment& env, IR::Program& program) { |
||||
|
TextureInstVector to_replace; |
||||
|
for (IR::Function& function : program.functions) { |
||||
|
for (IR::Block* const block : function.post_order_blocks) { |
||||
|
for (IR::Inst& inst : block->Instructions()) { |
||||
|
if (!IsTextureInstruction(inst)) { |
||||
|
continue; |
||||
|
} |
||||
|
to_replace.push_back(MakeInst(env, block, inst)); |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
// Sort instructions to visit textures by constant buffer index, then by offset
|
||||
|
std::ranges::sort(to_replace, [](const auto& lhs, const auto& rhs) { |
||||
|
return lhs.cbuf.offset < rhs.cbuf.offset; |
||||
|
}); |
||||
|
std::stable_sort(to_replace.begin(), to_replace.end(), [](const auto& lhs, const auto& rhs) { |
||||
|
return lhs.cbuf.index < rhs.cbuf.index; |
||||
|
}); |
||||
|
Descriptors descriptors{program.info.texture_descriptors}; |
||||
|
for (TextureInst& texture_inst : to_replace) { |
||||
|
// TODO: Handle arrays
|
||||
|
IR::Inst* const inst{texture_inst.inst}; |
||||
|
const u32 index{descriptors.Add(TextureDescriptor{ |
||||
|
.type{inst->Flags<IR::TextureInstInfo>().type}, |
||||
|
.cbuf_index{texture_inst.cbuf.index}, |
||||
|
.cbuf_offset{texture_inst.cbuf.offset}, |
||||
|
.count{1}, |
||||
|
})}; |
||||
|
inst->ReplaceOpcode(IndexedInstruction(*inst)); |
||||
|
inst->SetArg(0, IR::Value{index}); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} // namespace Shader::Optimization
|
||||
Write
Preview
Loading…
Cancel
Save
Reference in new issue