|
|
@ -80,6 +80,319 @@ static Math::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_ |
|
|
MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255)); |
|
|
MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255)); |
|
|
MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100)); |
|
|
MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100)); |
|
|
|
|
|
|
|
|
|
|
|
static void MemoryFill(const Regs::MemoryFillConfig& config) { |
|
|
|
|
|
const PAddr start_addr = config.GetStartAddress(); |
|
|
|
|
|
const PAddr end_addr = config.GetEndAddress(); |
|
|
|
|
|
|
|
|
|
|
|
// TODO: do hwtest with these cases
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(start_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid start address 0x%08X", start_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(end_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid end address 0x%08X", end_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (end_addr <= start_addr) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid memory range from 0x%08X to 0x%08X", start_addr, end_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
u8* start = Memory::GetPhysicalPointer(start_addr); |
|
|
|
|
|
u8* end = Memory::GetPhysicalPointer(end_addr); |
|
|
|
|
|
|
|
|
|
|
|
// TODO: Consider always accelerating and returning vector of
|
|
|
|
|
|
// regions that the accelerated fill did not cover to
|
|
|
|
|
|
// reduce/eliminate the fill that the cpu has to do.
|
|
|
|
|
|
// This would also mean that the flush below is not needed.
|
|
|
|
|
|
// Fill should first flush all surfaces that touch but are
|
|
|
|
|
|
// not completely within the fill range.
|
|
|
|
|
|
// Then fill all completely covered surfaces, and return the
|
|
|
|
|
|
// regions that were between surfaces or within the touching
|
|
|
|
|
|
// ones for cpu to manually fill here.
|
|
|
|
|
|
if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config)) |
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(), |
|
|
|
|
|
config.GetEndAddress() - config.GetStartAddress()); |
|
|
|
|
|
|
|
|
|
|
|
if (config.fill_24bit) { |
|
|
|
|
|
// fill with 24-bit values
|
|
|
|
|
|
for (u8* ptr = start; ptr < end; ptr += 3) { |
|
|
|
|
|
ptr[0] = config.value_24bit_r; |
|
|
|
|
|
ptr[1] = config.value_24bit_g; |
|
|
|
|
|
ptr[2] = config.value_24bit_b; |
|
|
|
|
|
} |
|
|
|
|
|
} else if (config.fill_32bit) { |
|
|
|
|
|
// fill with 32-bit values
|
|
|
|
|
|
if (end > start) { |
|
|
|
|
|
u32 value = config.value_32bit; |
|
|
|
|
|
size_t len = (end - start) / sizeof(u32); |
|
|
|
|
|
for (size_t i = 0; i < len; ++i) |
|
|
|
|
|
memcpy(&start[i * sizeof(u32)], &value, sizeof(u32)); |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
// fill with 16-bit values
|
|
|
|
|
|
u16 value_16bit = config.value_16bit.Value(); |
|
|
|
|
|
for (u8* ptr = start; ptr < end; ptr += sizeof(u16)) |
|
|
|
|
|
memcpy(ptr, &value_16bit, sizeof(u16)); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void DisplayTransfer(const Regs::DisplayTransferConfig& config) { |
|
|
|
|
|
const PAddr src_addr = config.GetPhysicalInputAddress(); |
|
|
|
|
|
const PAddr dst_addr = config.GetPhysicalOutputAddress(); |
|
|
|
|
|
|
|
|
|
|
|
// TODO: do hwtest with these cases
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(src_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(dst_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_width == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero input width"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_height == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero input height"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.output_width == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero output width"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.output_height == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero output height"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config)) |
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
u8* src_pointer = Memory::GetPhysicalPointer(src_addr); |
|
|
|
|
|
u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); |
|
|
|
|
|
|
|
|
|
|
|
if (config.scaling > config.ScaleXY) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", |
|
|
|
|
|
config.scaling.Value()); |
|
|
|
|
|
UNIMPLEMENTED(); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_linear && config.scaling != config.NoScale) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); |
|
|
|
|
|
UNIMPLEMENTED(); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int horizontal_scale = config.scaling != config.NoScale ? 1 : 0; |
|
|
|
|
|
int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0; |
|
|
|
|
|
|
|
|
|
|
|
u32 output_width = config.output_width >> horizontal_scale; |
|
|
|
|
|
u32 output_height = config.output_height >> vertical_scale; |
|
|
|
|
|
|
|
|
|
|
|
u32 input_size = |
|
|
|
|
|
config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); |
|
|
|
|
|
u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); |
|
|
|
|
|
|
|
|
|
|
|
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size); |
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), output_size); |
|
|
|
|
|
|
|
|
|
|
|
for (u32 y = 0; y < output_height; ++y) { |
|
|
|
|
|
for (u32 x = 0; x < output_width; ++x) { |
|
|
|
|
|
Math::Vec4<u8> src_color; |
|
|
|
|
|
|
|
|
|
|
|
// Calculate the [x,y] position of the input image
|
|
|
|
|
|
// based on the current output position and the scale
|
|
|
|
|
|
u32 input_x = x << horizontal_scale; |
|
|
|
|
|
u32 input_y = y << vertical_scale; |
|
|
|
|
|
|
|
|
|
|
|
u32 output_y; |
|
|
|
|
|
if (config.flip_vertically) { |
|
|
|
|
|
// Flip the y value of the output data,
|
|
|
|
|
|
// we do this after calculating the [x,y] position of the input image
|
|
|
|
|
|
// to account for the scaling options.
|
|
|
|
|
|
output_y = output_height - y - 1; |
|
|
|
|
|
} else { |
|
|
|
|
|
output_y = y; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); |
|
|
|
|
|
u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); |
|
|
|
|
|
u32 src_offset; |
|
|
|
|
|
u32 dst_offset; |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_linear) { |
|
|
|
|
|
if (!config.dont_swizzle) { |
|
|
|
|
|
// Interpret the input as linear and the output as tiled
|
|
|
|
|
|
u32 coarse_y = output_y & ~7; |
|
|
|
|
|
u32 stride = output_width * dst_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; |
|
|
|
|
|
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + |
|
|
|
|
|
coarse_y * stride; |
|
|
|
|
|
} else { |
|
|
|
|
|
// Both input and output are linear
|
|
|
|
|
|
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; |
|
|
|
|
|
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
if (!config.dont_swizzle) { |
|
|
|
|
|
// Interpret the input as tiled and the output as linear
|
|
|
|
|
|
u32 coarse_y = input_y & ~7; |
|
|
|
|
|
u32 stride = config.input_width * src_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + |
|
|
|
|
|
coarse_y * stride; |
|
|
|
|
|
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel; |
|
|
|
|
|
} else { |
|
|
|
|
|
// Both input and output are tiled
|
|
|
|
|
|
u32 out_coarse_y = output_y & ~7; |
|
|
|
|
|
u32 out_stride = output_width * dst_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
u32 in_coarse_y = input_y & ~7; |
|
|
|
|
|
u32 in_stride = config.input_width * src_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + |
|
|
|
|
|
in_coarse_y * in_stride; |
|
|
|
|
|
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) + |
|
|
|
|
|
out_coarse_y * out_stride; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
const u8* src_pixel = src_pointer + src_offset; |
|
|
|
|
|
src_color = DecodePixel(config.input_format, src_pixel); |
|
|
|
|
|
if (config.scaling == config.ScaleX) { |
|
|
|
|
|
Math::Vec4<u8> pixel = |
|
|
|
|
|
DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); |
|
|
|
|
|
src_color = ((src_color + pixel) / 2).Cast<u8>(); |
|
|
|
|
|
} else if (config.scaling == config.ScaleXY) { |
|
|
|
|
|
Math::Vec4<u8> pixel1 = |
|
|
|
|
|
DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); |
|
|
|
|
|
Math::Vec4<u8> pixel2 = |
|
|
|
|
|
DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); |
|
|
|
|
|
Math::Vec4<u8> pixel3 = |
|
|
|
|
|
DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); |
|
|
|
|
|
src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
u8* dst_pixel = dst_pointer + dst_offset; |
|
|
|
|
|
switch (config.output_format) { |
|
|
|
|
|
case Regs::PixelFormat::RGBA8: |
|
|
|
|
|
Color::EncodeRGBA8(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB8: |
|
|
|
|
|
Color::EncodeRGB8(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB565: |
|
|
|
|
|
Color::EncodeRGB565(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB5A1: |
|
|
|
|
|
Color::EncodeRGB5A1(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGBA4: |
|
|
|
|
|
Color::EncodeRGBA4(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
default: |
|
|
|
|
|
LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", |
|
|
|
|
|
config.output_format.Value()); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void TextureCopy(const Regs::DisplayTransferConfig& config) { |
|
|
|
|
|
const PAddr src_addr = config.GetPhysicalInputAddress(); |
|
|
|
|
|
const PAddr dst_addr = config.GetPhysicalOutputAddress(); |
|
|
|
|
|
|
|
|
|
|
|
// TODO: do hwtest with these cases
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(src_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!Memory::IsValidPhysicalAddress(dst_addr)) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.texture_copy.input_width == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero input width"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.texture_copy.output_width == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero output width"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.texture_copy.size == 0) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "zero size"); |
|
|
|
|
|
return; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config)) |
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
u8* src_pointer = Memory::GetPhysicalPointer(src_addr); |
|
|
|
|
|
u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr); |
|
|
|
|
|
|
|
|
|
|
|
u32 input_width = config.texture_copy.input_width * 16; |
|
|
|
|
|
u32 input_gap = config.texture_copy.input_gap * 16; |
|
|
|
|
|
u32 output_width = config.texture_copy.output_width * 16; |
|
|
|
|
|
u32 output_gap = config.texture_copy.output_gap * 16; |
|
|
|
|
|
|
|
|
|
|
|
size_t contiguous_input_size = |
|
|
|
|
|
config.texture_copy.size / input_width * (input_width + input_gap); |
|
|
|
|
|
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), |
|
|
|
|
|
static_cast<u32>(contiguous_input_size)); |
|
|
|
|
|
|
|
|
|
|
|
size_t contiguous_output_size = |
|
|
|
|
|
config.texture_copy.size / output_width * (output_width + output_gap); |
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), |
|
|
|
|
|
static_cast<u32>(contiguous_output_size)); |
|
|
|
|
|
|
|
|
|
|
|
u32 remaining_size = config.texture_copy.size; |
|
|
|
|
|
u32 remaining_input = input_width; |
|
|
|
|
|
u32 remaining_output = output_width; |
|
|
|
|
|
while (remaining_size > 0) { |
|
|
|
|
|
u32 copy_size = std::min({remaining_input, remaining_output, remaining_size}); |
|
|
|
|
|
|
|
|
|
|
|
std::memcpy(dst_pointer, src_pointer, copy_size); |
|
|
|
|
|
src_pointer += copy_size; |
|
|
|
|
|
dst_pointer += copy_size; |
|
|
|
|
|
|
|
|
|
|
|
remaining_input -= copy_size; |
|
|
|
|
|
remaining_output -= copy_size; |
|
|
|
|
|
remaining_size -= copy_size; |
|
|
|
|
|
|
|
|
|
|
|
if (remaining_input == 0) { |
|
|
|
|
|
remaining_input = input_width; |
|
|
|
|
|
src_pointer += input_gap; |
|
|
|
|
|
} |
|
|
|
|
|
if (remaining_output == 0) { |
|
|
|
|
|
remaining_output = output_width; |
|
|
|
|
|
dst_pointer += output_gap; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
template <typename T> |
|
|
template <typename T> |
|
|
inline void Write(u32 addr, const T data) { |
|
|
inline void Write(u32 addr, const T data) { |
|
|
addr -= HW::VADDR_GPU; |
|
|
addr -= HW::VADDR_GPU; |
|
|
@ -102,50 +415,13 @@ inline void Write(u32 addr, const T data) { |
|
|
auto& config = g_regs.memory_fill_config[is_second_filler]; |
|
|
auto& config = g_regs.memory_fill_config[is_second_filler]; |
|
|
|
|
|
|
|
|
if (config.trigger) { |
|
|
if (config.trigger) { |
|
|
if (config.address_start) { // Some games pass invalid values here
|
|
|
|
|
|
u8* start = Memory::GetPhysicalPointer(config.GetStartAddress()); |
|
|
|
|
|
u8* end = Memory::GetPhysicalPointer(config.GetEndAddress()); |
|
|
|
|
|
|
|
|
|
|
|
// TODO: Consider always accelerating and returning vector of
|
|
|
|
|
|
// regions that the accelerated fill did not cover to
|
|
|
|
|
|
// reduce/eliminate the fill that the cpu has to do.
|
|
|
|
|
|
// This would also mean that the flush below is not needed.
|
|
|
|
|
|
// Fill should first flush all surfaces that touch but are
|
|
|
|
|
|
// not completely within the fill range.
|
|
|
|
|
|
// Then fill all completely covered surfaces, and return the
|
|
|
|
|
|
// regions that were between surfaces or within the touching
|
|
|
|
|
|
// ones for cpu to manually fill here.
|
|
|
|
|
|
if (!VideoCore::g_renderer->Rasterizer()->AccelerateFill(config)) { |
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(), |
|
|
|
|
|
config.GetEndAddress() - |
|
|
|
|
|
config.GetStartAddress()); |
|
|
|
|
|
|
|
|
|
|
|
if (config.fill_24bit) { |
|
|
|
|
|
// fill with 24-bit values
|
|
|
|
|
|
for (u8* ptr = start; ptr < end; ptr += 3) { |
|
|
|
|
|
ptr[0] = config.value_24bit_r; |
|
|
|
|
|
ptr[1] = config.value_24bit_g; |
|
|
|
|
|
ptr[2] = config.value_24bit_b; |
|
|
|
|
|
} |
|
|
|
|
|
} else if (config.fill_32bit) { |
|
|
|
|
|
// fill with 32-bit values
|
|
|
|
|
|
if (end > start) { |
|
|
|
|
|
u32 value = config.value_32bit; |
|
|
|
|
|
size_t len = (end - start) / sizeof(u32); |
|
|
|
|
|
for (size_t i = 0; i < len; ++i) |
|
|
|
|
|
memcpy(&start[i * sizeof(u32)], &value, sizeof(u32)); |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
// fill with 16-bit values
|
|
|
|
|
|
u16 value_16bit = config.value_16bit.Value(); |
|
|
|
|
|
for (u8* ptr = start; ptr < end; ptr += sizeof(u16)) |
|
|
|
|
|
memcpy(ptr, &value_16bit, sizeof(u16)); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), |
|
|
|
|
|
config.GetEndAddress()); |
|
|
|
|
|
|
|
|
MemoryFill(config); |
|
|
|
|
|
LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), |
|
|
|
|
|
config.GetEndAddress()); |
|
|
|
|
|
|
|
|
|
|
|
// It seems that it won't signal interrupt if "address_start" is zero.
|
|
|
|
|
|
// TODO: hwtest this
|
|
|
|
|
|
if (config.GetStartAddress() != 0) { |
|
|
if (!is_second_filler) { |
|
|
if (!is_second_filler) { |
|
|
GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); |
|
|
GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); |
|
|
} else { |
|
|
} else { |
|
|
@ -171,207 +447,22 @@ inline void Write(u32 addr, const T data) { |
|
|
Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, |
|
|
Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, |
|
|
nullptr); |
|
|
nullptr); |
|
|
|
|
|
|
|
|
if (!VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config)) { |
|
|
|
|
|
u8* src_pointer = Memory::GetPhysicalPointer(config.GetPhysicalInputAddress()); |
|
|
|
|
|
u8* dst_pointer = Memory::GetPhysicalPointer(config.GetPhysicalOutputAddress()); |
|
|
|
|
|
|
|
|
|
|
|
if (config.is_texture_copy) { |
|
|
|
|
|
u32 input_width = config.texture_copy.input_width * 16; |
|
|
|
|
|
u32 input_gap = config.texture_copy.input_gap * 16; |
|
|
|
|
|
u32 output_width = config.texture_copy.output_width * 16; |
|
|
|
|
|
u32 output_gap = config.texture_copy.output_gap * 16; |
|
|
|
|
|
|
|
|
|
|
|
size_t contiguous_input_size = |
|
|
|
|
|
config.texture_copy.size / input_width * (input_width + input_gap); |
|
|
|
|
|
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), |
|
|
|
|
|
static_cast<u32>(contiguous_input_size)); |
|
|
|
|
|
|
|
|
|
|
|
size_t contiguous_output_size = |
|
|
|
|
|
config.texture_copy.size / output_width * (output_width + output_gap); |
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion( |
|
|
|
|
|
config.GetPhysicalOutputAddress(), |
|
|
|
|
|
static_cast<u32>(contiguous_output_size)); |
|
|
|
|
|
|
|
|
|
|
|
u32 remaining_size = config.texture_copy.size; |
|
|
|
|
|
u32 remaining_input = input_width; |
|
|
|
|
|
u32 remaining_output = output_width; |
|
|
|
|
|
while (remaining_size > 0) { |
|
|
|
|
|
u32 copy_size = |
|
|
|
|
|
std::min({remaining_input, remaining_output, remaining_size}); |
|
|
|
|
|
|
|
|
|
|
|
std::memcpy(dst_pointer, src_pointer, copy_size); |
|
|
|
|
|
src_pointer += copy_size; |
|
|
|
|
|
dst_pointer += copy_size; |
|
|
|
|
|
|
|
|
|
|
|
remaining_input -= copy_size; |
|
|
|
|
|
remaining_output -= copy_size; |
|
|
|
|
|
remaining_size -= copy_size; |
|
|
|
|
|
|
|
|
|
|
|
if (remaining_input == 0) { |
|
|
|
|
|
remaining_input = input_width; |
|
|
|
|
|
src_pointer += input_gap; |
|
|
|
|
|
} |
|
|
|
|
|
if (remaining_output == 0) { |
|
|
|
|
|
remaining_output = output_width; |
|
|
|
|
|
dst_pointer += output_gap; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
LOG_TRACE( |
|
|
|
|
|
HW_GPU, |
|
|
|
|
|
"TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> 0x%08X(%u+%u), flags 0x%08X", |
|
|
|
|
|
config.texture_copy.size, config.GetPhysicalInputAddress(), input_width, |
|
|
|
|
|
input_gap, config.GetPhysicalOutputAddress(), output_width, output_gap, |
|
|
|
|
|
config.flags); |
|
|
|
|
|
|
|
|
|
|
|
GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.scaling > config.ScaleXY) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", |
|
|
|
|
|
config.scaling.Value()); |
|
|
|
|
|
UNIMPLEMENTED(); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_linear && config.scaling != config.NoScale) { |
|
|
|
|
|
LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); |
|
|
|
|
|
UNIMPLEMENTED(); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int horizontal_scale = config.scaling != config.NoScale ? 1 : 0; |
|
|
|
|
|
int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0; |
|
|
|
|
|
|
|
|
|
|
|
u32 output_width = config.output_width >> horizontal_scale; |
|
|
|
|
|
u32 output_height = config.output_height >> vertical_scale; |
|
|
|
|
|
|
|
|
|
|
|
u32 input_size = config.input_width * config.input_height * |
|
|
|
|
|
GPU::Regs::BytesPerPixel(config.input_format); |
|
|
|
|
|
u32 output_size = |
|
|
|
|
|
output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); |
|
|
|
|
|
|
|
|
|
|
|
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size); |
|
|
|
|
|
Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), |
|
|
|
|
|
output_size); |
|
|
|
|
|
|
|
|
|
|
|
for (u32 y = 0; y < output_height; ++y) { |
|
|
|
|
|
for (u32 x = 0; x < output_width; ++x) { |
|
|
|
|
|
Math::Vec4<u8> src_color; |
|
|
|
|
|
|
|
|
|
|
|
// Calculate the [x,y] position of the input image
|
|
|
|
|
|
// based on the current output position and the scale
|
|
|
|
|
|
u32 input_x = x << horizontal_scale; |
|
|
|
|
|
u32 input_y = y << vertical_scale; |
|
|
|
|
|
|
|
|
|
|
|
if (config.flip_vertically) { |
|
|
|
|
|
// Flip the y value of the output data,
|
|
|
|
|
|
// we do this after calculating the [x,y] position of the input image
|
|
|
|
|
|
// to account for the scaling options.
|
|
|
|
|
|
y = output_height - y - 1; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); |
|
|
|
|
|
u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); |
|
|
|
|
|
u32 src_offset; |
|
|
|
|
|
u32 dst_offset; |
|
|
|
|
|
|
|
|
|
|
|
if (config.input_linear) { |
|
|
|
|
|
if (!config.dont_swizzle) { |
|
|
|
|
|
// Interpret the input as linear and the output as tiled
|
|
|
|
|
|
u32 coarse_y = y & ~7; |
|
|
|
|
|
u32 stride = output_width * dst_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = |
|
|
|
|
|
(input_x + input_y * config.input_width) * src_bytes_per_pixel; |
|
|
|
|
|
dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + |
|
|
|
|
|
coarse_y * stride; |
|
|
|
|
|
} else { |
|
|
|
|
|
// Both input and output are linear
|
|
|
|
|
|
src_offset = |
|
|
|
|
|
(input_x + input_y * config.input_width) * src_bytes_per_pixel; |
|
|
|
|
|
dst_offset = (x + y * output_width) * dst_bytes_per_pixel; |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
if (!config.dont_swizzle) { |
|
|
|
|
|
// Interpret the input as tiled and the output as linear
|
|
|
|
|
|
u32 coarse_y = input_y & ~7; |
|
|
|
|
|
u32 stride = config.input_width * src_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = VideoCore::GetMortonOffset(input_x, input_y, |
|
|
|
|
|
src_bytes_per_pixel) + |
|
|
|
|
|
coarse_y * stride; |
|
|
|
|
|
dst_offset = (x + y * output_width) * dst_bytes_per_pixel; |
|
|
|
|
|
} else { |
|
|
|
|
|
// Both input and output are tiled
|
|
|
|
|
|
u32 out_coarse_y = y & ~7; |
|
|
|
|
|
u32 out_stride = output_width * dst_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
u32 in_coarse_y = input_y & ~7; |
|
|
|
|
|
u32 in_stride = config.input_width * src_bytes_per_pixel; |
|
|
|
|
|
|
|
|
|
|
|
src_offset = VideoCore::GetMortonOffset(input_x, input_y, |
|
|
|
|
|
src_bytes_per_pixel) + |
|
|
|
|
|
in_coarse_y * in_stride; |
|
|
|
|
|
dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + |
|
|
|
|
|
out_coarse_y * out_stride; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
const u8* src_pixel = src_pointer + src_offset; |
|
|
|
|
|
src_color = DecodePixel(config.input_format, src_pixel); |
|
|
|
|
|
if (config.scaling == config.ScaleX) { |
|
|
|
|
|
Math::Vec4<u8> pixel = |
|
|
|
|
|
DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); |
|
|
|
|
|
src_color = ((src_color + pixel) / 2).Cast<u8>(); |
|
|
|
|
|
} else if (config.scaling == config.ScaleXY) { |
|
|
|
|
|
Math::Vec4<u8> pixel1 = DecodePixel( |
|
|
|
|
|
config.input_format, src_pixel + 1 * src_bytes_per_pixel); |
|
|
|
|
|
Math::Vec4<u8> pixel2 = DecodePixel( |
|
|
|
|
|
config.input_format, src_pixel + 2 * src_bytes_per_pixel); |
|
|
|
|
|
Math::Vec4<u8> pixel3 = DecodePixel( |
|
|
|
|
|
config.input_format, src_pixel + 3 * src_bytes_per_pixel); |
|
|
|
|
|
src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
u8* dst_pixel = dst_pointer + dst_offset; |
|
|
|
|
|
switch (config.output_format) { |
|
|
|
|
|
case Regs::PixelFormat::RGBA8: |
|
|
|
|
|
Color::EncodeRGBA8(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB8: |
|
|
|
|
|
Color::EncodeRGB8(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB565: |
|
|
|
|
|
Color::EncodeRGB565(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGB5A1: |
|
|
|
|
|
Color::EncodeRGB5A1(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
case Regs::PixelFormat::RGBA4: |
|
|
|
|
|
Color::EncodeRGBA4(src_color, dst_pixel); |
|
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
|
|
default: |
|
|
|
|
|
LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", |
|
|
|
|
|
config.output_format.Value()); |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> " |
|
|
|
|
|
|
|
|
if (config.is_texture_copy) { |
|
|
|
|
|
TextureCopy(config); |
|
|
|
|
|
LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> " |
|
|
|
|
|
"0x%08X(%u+%u), flags 0x%08X", |
|
|
|
|
|
config.texture_copy.size, config.GetPhysicalInputAddress(), |
|
|
|
|
|
config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16, |
|
|
|
|
|
config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16, |
|
|
|
|
|
config.texture_copy.output_gap * 16, config.flags); |
|
|
|
|
|
} else { |
|
|
|
|
|
DisplayTransfer(config); |
|
|
|
|
|
LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> " |
|
|
"0x%08x(%ux%u), dst format %x, flags 0x%08X", |
|
|
"0x%08x(%ux%u), dst format %x, flags 0x%08X", |
|
|
config.output_height * output_width * |
|
|
|
|
|
GPU::Regs::BytesPerPixel(config.output_format), |
|
|
|
|
|
config.GetPhysicalInputAddress(), config.input_width.Value(), |
|
|
config.GetPhysicalInputAddress(), config.input_width.Value(), |
|
|
config.input_height.Value(), config.GetPhysicalOutputAddress(), |
|
|
config.input_height.Value(), config.GetPhysicalOutputAddress(), |
|
|
output_width, output_height, config.output_format.Value(), config.flags); |
|
|
|
|
|
|
|
|
config.output_width.Value(), config.output_height.Value(), |
|
|
|
|
|
config.output_format.Value(), config.flags); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
g_regs.display_transfer_config.trigger = 0; |
|
|
g_regs.display_transfer_config.trigger = 0; |
|
|
|