Browse Source

better unreachables

Signed-off-by: lizzie <lizzie@eden-emu.dev>
pull/2890/head
lizzie 3 months ago
parent
commit
1f18fbf6a3
No known key found for this signature in database GPG Key ID: 287378CADCAB13
  1. 4
      src/dynarmic/src/dynarmic/backend/arm64/abi.h
  2. 2
      src/dynarmic/src/dynarmic/backend/arm64/address_space.cpp
  3. 2
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a32.cpp
  4. 3
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a32_coprocessor.cpp
  5. 2
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a64.cpp
  6. 11
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_floating_point.cpp
  7. 6
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector.cpp
  8. 12
      src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector_floating_point.cpp
  9. 8
      src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.cpp
  10. 3
      src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.h
  11. 3
      src/dynarmic/src/dynarmic/backend/riscv64/code_block.h
  12. 2
      src/dynarmic/src/dynarmic/backend/riscv64/emit_riscv64_a32.cpp
  13. 30
      src/dynarmic/src/dynarmic/backend/riscv64/reg_alloc.cpp
  14. 16
      src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp
  15. 12
      src/dynarmic/src/dynarmic/backend/x64/emit_x64_vector.cpp
  16. 3
      src/dynarmic/src/dynarmic/backend/x64/oparg.h
  17. 13
      src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp
  18. 25
      src/dynarmic/src/dynarmic/common/fp/process_exception.cpp
  19. 16
      src/dynarmic/src/dynarmic/frontend/A32/a32_ir_emitter.cpp
  20. 2
      src/dynarmic/src/dynarmic/frontend/A32/translate/impl/a32_translate_impl.cpp
  21. 2
      src/dynarmic/src/dynarmic/frontend/A32/translate/impl/asimd_load_store_structures.cpp
  22. 21
      src/dynarmic/src/dynarmic/frontend/A64/translate/impl/impl.cpp
  23. 11
      src/dynarmic/src/dynarmic/frontend/A64/translate/impl/simd_scalar_x_indexed_element.cpp
  24. 2
      src/dynarmic/src/dynarmic/ir/ir_emitter.h
  25. 2
      src/dynarmic/tests/A32/fuzz_arm.cpp
  26. 16
      src/dynarmic/tests/A32/testenv.h
  27. 16
      src/dynarmic/tests/A64/testenv.h
  28. 2
      src/dynarmic/tests/test_generator.cpp

4
src/dynarmic/src/dynarmic/backend/arm64/abi.h

@ -62,9 +62,7 @@ constexpr RegisterList ToRegList(oaknut::Reg reg) {
return RegisterList{1} << (reg.index() + 32);
}
if (reg.index() == 31) {
ASSERT(false && "ZR not allowed in reg list");
}
ASSERT(red.index() != 31 && "ZR not allowed in reg list");
if (reg.index() == -1) {
return RegisterList{1} << 31;

2
src/dynarmic/src/dynarmic/backend/arm64/address_space.cpp

@ -292,7 +292,7 @@ void AddressSpace::LinkBlockLinks(const CodePtr entry_point, const CodePtr targe
}
break;
default:
ASSERT(false && "Invalid BlockRelocationType");
UNREACHABLE();
}
}
}

2
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a32.cpp

@ -34,7 +34,7 @@ oaknut::Label EmitA32Cond(oaknut::CodeGenerator& code, EmitContext&, IR::Cond co
void EmitA32Terminal(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Term::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step);
void EmitA32Terminal(oaknut::CodeGenerator&, EmitContext&, IR::Term::Interpret, IR::LocationDescriptor, bool) {
ASSERT(false && "Interpret should never be emitted.");
UNREACHABLE();
}
void EmitA32Terminal(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Term::ReturnToDispatch, IR::LocationDescriptor, bool) {

3
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a32_coprocessor.cpp

@ -20,7 +20,8 @@ namespace Dynarmic::Backend::Arm64 {
using namespace oaknut::util;
static void EmitCoprocessorException() {
ASSERT(false && "Should raise coproc exception here");
// TODO: Raise coproc except
UNREACHABLE();
}
static void CallCoprocCallback(oaknut::CodeGenerator& code, EmitContext& ctx, A32::Coprocessor::Callback callback, IR::Inst* inst = nullptr, std::optional<Argument::copyable_reference> arg0 = {}, std::optional<Argument::copyable_reference> arg1 = {}) {

2
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_a64.cpp

@ -36,7 +36,7 @@ oaknut::Label EmitA64Cond(oaknut::CodeGenerator& code, EmitContext&, IR::Cond co
void EmitA64Terminal(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Term::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step);
void EmitA64Terminal(oaknut::CodeGenerator&, EmitContext&, IR::Term::Interpret, IR::LocationDescriptor, bool) {
ASSERT(false && "Interpret should never be emitted.");
UNREACHABLE();
}
void EmitA64Terminal(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Term::ReturnToDispatch, IR::LocationDescriptor, bool) {

11
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_floating_point.cpp

@ -126,8 +126,7 @@ static void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst*
UNREACHABLE();
break;
default:
ASSERT(false && "Invalid RoundingMode");
break;
UNREACHABLE();
}
} else {
switch (rounding_mode) {
@ -150,8 +149,7 @@ static void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst*
UNREACHABLE();
break;
default:
ASSERT(false && "Invalid RoundingMode");
break;
UNREACHABLE();
}
}
}
@ -467,9 +465,8 @@ void EmitIR<IR::Opcode::FPRoundInt32>(oaknut::CodeGenerator& code, EmitContext&
case FP::RoundingMode::ToNearest_TieAwayFromZero:
code.FRINTA(Sresult, Soperand);
break;
default:
ASSERT(false && "Invalid RoundingMode");
}
UNREACHABLE();
}
}
@ -505,7 +502,7 @@ void EmitIR<IR::Opcode::FPRoundInt64>(oaknut::CodeGenerator& code, EmitContext&
code.FRINTA(Dresult, Doperand);
break;
default:
ASSERT(false && "Invalid RoundingMode");
UNREACHABLE();
}
}
}

6
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector.cpp

@ -1665,9 +1665,8 @@ void EmitIR<IR::Opcode::VectorTableLookup64>(oaknut::CodeGenerator& code, EmitCo
code.TBX(Dresult->B8(), oaknut::List{V0.B16(), V1.B16()}, Dindices->B8());
}
break;
default:
ASSERT(false && "Unsupported table_size");
}
UNREACHABLE();
}
template<>
@ -1729,9 +1728,8 @@ void EmitIR<IR::Opcode::VectorTableLookup128>(oaknut::CodeGenerator& code, EmitC
code.TBX(Qresult->B16(), oaknut::List{V0.B16(), V1.B16(), V2.B16(), V3.B16()}, Qindices->B16());
}
break;
default:
ASSERT(false && "Unsupported table_size");
}
UNREACHABLE();
}
template<>

12
src/dynarmic/src/dynarmic/backend/arm64/emit_arm64_vector_floating_point.cpp

@ -230,8 +230,7 @@ void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst)
UNREACHABLE();
break;
default:
ASSERT(false && "Invalid RoundingMode");
break;
UNREACHABLE();
}
} else {
switch (rounding_mode) {
@ -254,8 +253,7 @@ void EmitToFixed(oaknut::CodeGenerator& code, EmitContext& ctx, IR::Inst* inst)
UNREACHABLE();
break;
default:
ASSERT(false && "Invalid RoundingMode");
break;
UNREACHABLE();
}
}
}
@ -647,9 +645,8 @@ void EmitIR<IR::Opcode::FPVectorRoundInt32>(oaknut::CodeGenerator& code, EmitCon
case FP::RoundingMode::ToNearest_TieAwayFromZero:
code.FRINTA(Qresult->S4(), Qoperand->S4());
break;
default:
ASSERT(false && "Invalid RoundingMode");
}
UNREACHABLE();
}
});
}
@ -687,9 +684,8 @@ void EmitIR<IR::Opcode::FPVectorRoundInt64>(oaknut::CodeGenerator& code, EmitCon
case FP::RoundingMode::ToNearest_TieAwayFromZero:
code.FRINTA(Qresult->D2(), Qoperand->D2());
break;
default:
ASSERT(false && "Invalid RoundingMode");
}
UNREACHABLE();
}
});
}

8
src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.cpp

@ -328,8 +328,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
switch (current_location->kind) {
case HostLoc::Kind::Gpr:
ASSERT(false && "Logic error");
break;
UNREACHABLE(); //logic error
case HostLoc::Kind::Fpr:
code.FMOV(oaknut::XReg{new_location_index}, oaknut::DReg{current_location->index});
// ASSERT size fits
@ -354,8 +353,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
code.FMOV(oaknut::DReg{new_location_index}, oaknut::XReg{current_location->index});
break;
case HostLoc::Kind::Fpr:
ASSERT(false && "Logic error");
break;
UNREACHABLE(); //logic error
case HostLoc::Kind::Spill:
code.LDR(oaknut::QReg{new_location_index}, SP, spill_offset + current_location->index * spill_slot_size);
break;
@ -368,7 +366,7 @@ int RegAlloc::RealizeReadImpl(const IR::Value& value) {
fprs[new_location_index].realized = true;
return new_location_index;
} else if constexpr (required_kind == HostLoc::Kind::Flags) {
ASSERT(false && "A simple read from flags is likely a logic error.");
UNREACHABLE(); //A simple read from flags is likely a logic error
} else {
static_assert(Common::always_false_v<mcl::mp::lift_value<required_kind>>);
}

3
src/dynarmic/src/dynarmic/backend/arm64/reg_alloc.h

@ -371,9 +371,8 @@ void RAReg<T>::Realize() {
case RWType::ReadWrite:
reg = T{reg_alloc.RealizeReadWriteImpl<kind>(read_value, write_value)};
break;
default:
ASSERT(false && "Invalid RWType");
}
UNREACHABLE();
}
} // namespace Dynarmic::Backend::Arm64

3
src/dynarmic/src/dynarmic/backend/riscv64/code_block.h

@ -16,8 +16,7 @@ class CodeBlock {
public:
explicit CodeBlock(std::size_t size) noexcept : memsize(size) {
mem = (u8*)mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0);
if (mem == nullptr)
ASSERT(false && "out of memory");
ASSERT(mem != nullptr);
}
~CodeBlock() noexcept {

2
src/dynarmic/src/dynarmic/backend/riscv64/emit_riscv64_a32.cpp

@ -112,7 +112,7 @@ void EmitA32Cond(biscuit::Assembler& as, EmitContext&, IR::Cond cond, biscuit::L
void EmitA32Terminal(biscuit::Assembler& as, EmitContext& ctx, IR::Term::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step);
void EmitA32Terminal(biscuit::Assembler&, EmitContext&, IR::Term::Interpret, IR::LocationDescriptor, bool) {
ASSERT(false && "Interpret should never be emitted.");
UNREACHABLE();
}
void EmitA32Terminal(biscuit::Assembler& as, EmitContext& ctx, IR::Term::ReturnToDispatch, IR::LocationDescriptor, bool) {

30
src/dynarmic/src/dynarmic/backend/riscv64/reg_alloc.cpp

@ -193,8 +193,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
switch (current_location->kind) {
case HostLoc::Kind::Gpr:
ASSERT(false && "Logic error");
break;
UNREACHABLE(); //logic error
case HostLoc::Kind::Fpr:
as.FMV_X_D(biscuit::GPR(new_location_index), biscuit::FPR{current_location->index});
// ASSERT size fits
@ -216,8 +215,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
as.FMV_D_X(biscuit::FPR{new_location_index}, biscuit::GPR(current_location->index));
break;
case HostLoc::Kind::Fpr:
ASSERT(false && "Logic error");
break;
UNREACHABLE(); //logic error
case HostLoc::Kind::Spill:
as.FLD(biscuit::FPR{new_location_index}, spill_offset + current_location->index * spill_slot_size, biscuit::sp);
break;
@ -307,14 +305,11 @@ std::optional<HostLoc> RegAlloc::ValueLocation(const IR::Inst* value) const {
const auto contains_value = [value](const HostLocInfo& info) {
return info.Contains(value);
};
if (const auto iter = std::find_if(gprs.begin(), gprs.end(), contains_value); iter != gprs.end()) {
return HostLoc{HostLoc::Kind::Gpr, static_cast<u32>(iter - gprs.begin())};
}
if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value); iter != fprs.end()) {
} else if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value); iter != fprs.end()) {
return HostLoc{HostLoc::Kind::Fpr, static_cast<u32>(iter - fprs.begin())};
}
if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value); iter != spills.end()) {
} else if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value); iter != spills.end()) {
return HostLoc{HostLoc::Kind::Spill, static_cast<u32>(iter - spills.begin())};
}
return std::nullopt;
@ -323,30 +318,27 @@ std::optional<HostLoc> RegAlloc::ValueLocation(const IR::Inst* value) const {
HostLocInfo& RegAlloc::ValueInfo(HostLoc host_loc) {
switch (host_loc.kind) {
case HostLoc::Kind::Gpr:
return gprs[static_cast<size_t>(host_loc.index)];
return gprs[size_t(host_loc.index)];
case HostLoc::Kind::Fpr:
return fprs[static_cast<size_t>(host_loc.index)];
return fprs[size_t(host_loc.index)];
case HostLoc::Kind::Spill:
return spills[static_cast<size_t>(host_loc.index)];
return spills[size_t(host_loc.index)];
}
ASSERT(false && "RegAlloc::ValueInfo: Invalid HostLoc::Kind");
UNREACHABLE();
}
HostLocInfo& RegAlloc::ValueInfo(const IR::Inst* value) {
const auto contains_value = [value](const HostLocInfo& info) {
return info.Contains(value);
};
if (const auto iter = std::find_if(gprs.begin(), gprs.end(), contains_value); iter != gprs.end()) {
return *iter;
}
if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value); iter != gprs.end()) {
} else if (const auto iter = std::find_if(fprs.begin(), fprs.end(), contains_value); iter != gprs.end()) {
return *iter;
}
if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value); iter != gprs.end()) {
} else if (const auto iter = std::find_if(spills.begin(), spills.end(), contains_value); iter != gprs.end()) {
return *iter;
}
ASSERT(false && "RegAlloc::ValueInfo: Value not found");
UNREACHABLE();
}
} // namespace Dynarmic::Backend::RV64

16
src/dynarmic/src/dynarmic/backend/x64/a32_emit_x64.cpp

@ -48,18 +48,16 @@ static Xbyak::Address MJitStateReg(A32::Reg reg) {
static Xbyak::Address MJitStateExtReg(A32::ExtReg reg) {
if (A32::IsSingleExtReg(reg)) {
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::S0);
const size_t index = size_t(reg) - size_t(A32::ExtReg::S0);
return dword[BlockOfCode::ABI_JIT_PTR + offsetof(A32JitState, ExtReg) + sizeof(u32) * index];
}
if (A32::IsDoubleExtReg(reg)) {
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
} else if (A32::IsDoubleExtReg(reg)) {
const size_t index = size_t(reg) - size_t(A32::ExtReg::D0);
return qword[BlockOfCode::ABI_JIT_PTR + offsetof(A32JitState, ExtReg) + sizeof(u64) * index];
}
if (A32::IsQuadExtReg(reg)) {
const size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::Q0);
} else if (A32::IsQuadExtReg(reg)) {
const size_t index = size_t(reg) - size_t(A32::ExtReg::Q0);
return xword[BlockOfCode::ABI_JIT_PTR + offsetof(A32JitState, ExtReg) + 2 * sizeof(u64) * index];
}
ASSERT(false && "Should never happen.");
UNREACHABLE();
}
A32EmitContext::A32EmitContext(const A32::UserConfig& conf, RegAlloc& reg_alloc, IR::Block& block)
@ -847,7 +845,7 @@ void A32EmitX64::EmitA32SetFpscrNZCV(A32EmitContext& ctx, IR::Inst* inst) {
}
static void EmitCoprocessorException() {
ASSERT(false && "Should raise coproc exception here");
UNREACHABLE();
}
static void CallCoprocCallback(BlockOfCode& code, RegAlloc& reg_alloc, A32::Coprocessor::Callback callback, IR::Inst* inst = nullptr, std::optional<Argument::copyable_reference> arg0 = {}, std::optional<Argument::copyable_reference> arg1 = {}) {

12
src/dynarmic/src/dynarmic/backend/x64/emit_x64_vector.cpp

@ -2425,27 +2425,27 @@ void EmitX64::EmitVectorMultiply64(EmitContext& ctx, IR::Inst* inst) {
}
void EmitX64::EmitVectorMultiplySignedWiden8(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplySignedWiden8");
UNREACHABLE();
}
void EmitX64::EmitVectorMultiplySignedWiden16(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplySignedWiden16");
UNREACHABLE();
}
void EmitX64::EmitVectorMultiplySignedWiden32(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplySignedWiden32");
UNREACHABLE();
}
void EmitX64::EmitVectorMultiplyUnsignedWiden8(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplyUnsignedWiden8");
UNREACHABLE();
}
void EmitX64::EmitVectorMultiplyUnsignedWiden16(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplyUnsignedWiden16");
UNREACHABLE();
}
void EmitX64::EmitVectorMultiplyUnsignedWiden32(EmitContext&, IR::Inst*) {
ASSERT(false && "Unexpected VectorMultiplyUnsignedWiden32");
UNREACHABLE();
}
void EmitX64::EmitVectorNarrow16(EmitContext& ctx, IR::Inst* inst) {

3
src/dynarmic/src/dynarmic/backend/x64/oparg.h

@ -55,9 +55,6 @@ struct OpArg {
case 64:
inner_reg = inner_reg.cvt64();
return;
default:
ASSERT(false && "Invalid bits");
return;
}
}
UNREACHABLE();

13
src/dynarmic/src/dynarmic/backend/x64/reg_alloc.cpp

@ -567,7 +567,7 @@ HostLoc RegAlloc::FindFreeSpill(bool is_xmm) const noexcept {
for (size_t i = size_t(HostLoc::FirstSpill); i < hostloc_info.size(); ++i)
if (const auto loc = HostLoc(i); LocInfo(loc).IsEmpty())
return loc;
ASSERT(false && "All spill locations are full");
UNREACHABLE();
};
void RegAlloc::EmitMove(const size_t bit_width, const HostLoc to, const HostLoc from) noexcept {
@ -654,18 +654,13 @@ void RegAlloc::EmitMove(const size_t bit_width, const HostLoc to, const HostLoc
code->mov(Xbyak::util::dword[spill_to_op_arg_helper(to, reserved_stack_space)], HostLocToReg64(from).cvt32());
}
} else {
ASSERT(false && "Invalid RegAlloc::EmitMove");
UNREACHABLE();
}
}
void RegAlloc::EmitExchange(const HostLoc a, const HostLoc b) noexcept {
if (HostLocIsGPR(a) && HostLocIsGPR(b)) {
code->xchg(HostLocToReg64(a), HostLocToReg64(b));
} else if (HostLocIsXMM(a) && HostLocIsXMM(b)) {
ASSERT(false && "Check your code: Exchanging XMM registers is unnecessary");
} else {
ASSERT(false && "Invalid RegAlloc::EmitExchange");
}
ASSERT(HostLocIsGPR(a) && HostLocIsGPR(b) && "Exchanging XMM registers is uneeded OR invalid emit");
code->xchg(HostLocToReg64(a), HostLocToReg64(b));
}
} // namespace Dynarmic::Backend::X64

25
src/dynarmic/src/dynarmic/common/fp/process_exception.cpp

@ -18,44 +18,31 @@ namespace Dynarmic::FP {
void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr) {
switch (exception) {
case FPExc::InvalidOp:
if (fpcr.IOE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.IOE() && "Raising floating point exceptions unimplemented");
fpsr.IOC(true);
break;
case FPExc::DivideByZero:
if (fpcr.DZE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.DZE() && "Raising floating point exceptions unimplemented");
fpsr.DZC(true);
break;
case FPExc::Overflow:
if (fpcr.OFE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.OFE() && "Raising floating point exceptions unimplemented");
fpsr.OFC(true);
break;
case FPExc::Underflow:
if (fpcr.UFE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.UFE() && "Raising floating point exceptions unimplemented");
fpsr.UFC(true);
break;
case FPExc::Inexact:
if (fpcr.IXE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.IXE() && "Raising floating point exceptions unimplemented");
fpsr.IXC(true);
break;
case FPExc::InputDenorm:
if (fpcr.IDE()) {
ASSERT(false && "Raising floating point exceptions unimplemented");
}
ASSERT(!fpcr.IDE() && "Raising floating point exceptions unimplemented");
fpsr.IDC(true);
break;
default:
UNREACHABLE();
break;
}
}

16
src/dynarmic/src/dynarmic/frontend/A32/a32_ir_emitter.cpp

@ -56,15 +56,11 @@ IR::U32 IREmitter::GetRegister(Reg reg) {
}
IR::U32U64 IREmitter::GetExtendedRegister(ExtReg reg) {
if (A32::IsSingleExtReg(reg)) {
if (A32::IsSingleExtReg(reg))
return Inst<IR::U32U64>(Opcode::A32GetExtendedRegister32, IR::Value(reg));
}
if (A32::IsDoubleExtReg(reg)) {
else if (A32::IsDoubleExtReg(reg))
return Inst<IR::U32U64>(Opcode::A32GetExtendedRegister64, IR::Value(reg));
}
ASSERT(false && "Invalid reg.");
UNREACHABLE();
}
IR::U128 IREmitter::GetVector(ExtReg reg) {
@ -83,7 +79,7 @@ void IREmitter::SetExtendedRegister(const ExtReg reg, const IR::U32U64& value) {
} else if (A32::IsDoubleExtReg(reg)) {
Inst(Opcode::A32SetExtendedRegister64, IR::Value(reg), value);
} else {
ASSERT(false && "Invalid reg.");
UNREACHABLE();
}
}
@ -240,7 +236,7 @@ IR::UAny IREmitter::ReadMemory(size_t bitsize, const IR::U32& vaddr, IR::AccType
case 64:
return ReadMemory64(vaddr, acc_type);
}
ASSERT(false && "Invalid bitsize");
UNREACHABLE();
}
IR::U8 IREmitter::ReadMemory8(const IR::U32& vaddr, IR::AccType acc_type) {
@ -298,7 +294,7 @@ void IREmitter::WriteMemory(size_t bitsize, const IR::U32& vaddr, const IR::UAny
case 64:
return WriteMemory64(vaddr, value, acc_type);
}
ASSERT(false && "Invalid bitsize");
UNREACHABLE();
}
void IREmitter::WriteMemory8(const IR::U32& vaddr, const IR::U8& value, IR::AccType acc_type) {

2
src/dynarmic/src/dynarmic/frontend/A32/translate/impl/a32_translate_impl.cpp

@ -71,7 +71,7 @@ IR::UAny TranslatorVisitor::I(size_t bitsize, u64 value) {
case 64:
return ir.Imm64(value);
default:
ASSERT(false && "Imm - get: Invalid bitsize");
UNREACHABLE();
}
}

2
src/dynarmic/src/dynarmic/frontend/A32/translate/impl/asimd_load_store_structures.cpp

@ -69,7 +69,7 @@ std::optional<std::tuple<size_t, size_t, size_t>> DecodeType(Imm<4> type, size_t
}
return std::tuple<size_t, size_t, size_t>{4, 1, 2};
}
ASSERT(false && "Decode error");
UNREACHABLE();
}
} // namespace

21
src/dynarmic/src/dynarmic/frontend/A64/translate/impl/impl.cpp

@ -75,9 +75,8 @@ IR::UAny TranslatorVisitor::I(size_t bitsize, u64 value) {
return ir.Imm32(static_cast<u32>(value));
case 64:
return ir.Imm64(value);
default:
ASSERT(false && "Imm - get: Invalid bitsize");
}
UNREACHABLE();
}
IR::UAny TranslatorVisitor::X(size_t bitsize, Reg reg) {
@ -90,9 +89,8 @@ IR::UAny TranslatorVisitor::X(size_t bitsize, Reg reg) {
return ir.GetW(reg);
case 64:
return ir.GetX(reg);
default:
ASSERT(false && "X - get: Invalid bitsize");
}
UNREACHABLE();
}
void TranslatorVisitor::X(size_t bitsize, Reg reg, IR::U32U64 value) {
@ -103,9 +101,8 @@ void TranslatorVisitor::X(size_t bitsize, Reg reg, IR::U32U64 value) {
case 64:
ir.SetX(reg, value);
return;
default:
ASSERT(false && "X - set: Invalid bitsize");
}
UNREACHABLE();
}
IR::U32U64 TranslatorVisitor::SP(size_t bitsize) {
@ -114,9 +111,8 @@ IR::U32U64 TranslatorVisitor::SP(size_t bitsize) {
return ir.LeastSignificantWord(ir.GetSP());
case 64:
return ir.GetSP();
default:
ASSERT(false && "SP - get : Invalid bitsize");
}
UNREACHABLE();
}
void TranslatorVisitor::SP(size_t bitsize, IR::U32U64 value) {
@ -127,9 +123,8 @@ void TranslatorVisitor::SP(size_t bitsize, IR::U32U64 value) {
case 64:
ir.SetSP(value);
break;
default:
ASSERT(false && "SP - set : Invalid bitsize");
}
UNREACHABLE();
}
IR::U128 TranslatorVisitor::V(size_t bitsize, Vec vec) {
@ -140,9 +135,8 @@ IR::U128 TranslatorVisitor::V(size_t bitsize, Vec vec) {
return ir.GetD(vec);
case 128:
return ir.GetQ(vec);
default:
ASSERT(false && "V - get : Invalid bitsize");
}
UNREACHABLE();
}
void TranslatorVisitor::V(size_t bitsize, Vec vec, IR::U128 value) {
@ -157,9 +151,8 @@ void TranslatorVisitor::V(size_t bitsize, Vec vec, IR::U128 value) {
case 128:
ir.SetQ(vec, value);
return;
default:
ASSERT(false && "V - Set : Invalid bitsize");
}
UNREACHABLE();
}
IR::UAnyU128 TranslatorVisitor::V_scalar(size_t bitsize, Vec vec) {

11
src/dynarmic/src/dynarmic/frontend/A64/translate/impl/simd_scalar_x_indexed_element.cpp

@ -70,15 +70,8 @@ bool MultiplyByElementHalfPrecision(TranslatorVisitor& v, Imm<1> L, Imm<1> M, Im
// TODO: Currently we don't implement half-precision paths
// for regular multiplication and extended multiplication.
if (extra_behavior == ExtraBehavior::None) {
ASSERT(false && "half-precision option unimplemented");
}
if (extra_behavior == ExtraBehavior::MultiplyExtended) {
ASSERT(false && "half-precision option unimplemented");
}
ASSERT(extra_behavior != ExtraBehavior::None
&& extra_behavior != ExtraBehavior::MultiplyExtended);
if (extra_behavior == ExtraBehavior::Subtract) {
operand1 = v.ir.FPNeg(operand1);
}

2
src/dynarmic/src/dynarmic/ir/ir_emitter.h

@ -124,7 +124,7 @@ public:
ASSERT(value.GetType() == Type::U64);
return value;
}
ASSERT(false && "Invalid bitsize");
UNREACHABLE();
}
U32 LeastSignificantWord(const U64& value) {

2
src/dynarmic/tests/A32/fuzz_arm.cpp

@ -282,7 +282,7 @@ std::vector<u16> GenRandomThumbInst(u32 pc, bool is_last_inst, A32::ITState it_s
} else if (bitstring.substr(0, 8) == "11110100") {
bitstring.replace(0, 8, "11111001");
} else {
ASSERT(false && "Unhandled ASIMD instruction: {} {}", fn, bs);
UNREACHABLE(); // "Unhandled ASIMD instruction: {} {}", fn, bs);
}
if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) {
invalid.emplace_back(InstructionGenerator{bitstring.c_str()});

16
src/dynarmic/tests/A32/testenv.h

@ -99,9 +99,13 @@ public:
void InterpreterFallback(u32 pc, size_t num_instructions) override { ASSERT(false, "InterpreterFallback({:08x} && {}) code = {:08x}", pc, num_instructions, *MemoryReadCode(pc)); }
void CallSVC(std::uint32_t swi) override { ASSERT(false && "CallSVC({})", swi); }
void CallSVC(std::uint32_t swi) override {
UNREACHABLE(); //ASSERT(false && "CallSVC({})", swi);
}
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception /*exception*/) override { ASSERT(false && "ExceptionRaised({:08x}) code = {:08x}", pc, *MemoryReadCode(pc)); }
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception /*exception*/) override {
UNREACHABLE(); //ASSERT(false && "ExceptionRaised({:08x}) code = {:08x}", pc, *MemoryReadCode(pc));
}
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {
@ -186,9 +190,13 @@ public:
void InterpreterFallback(std::uint32_t pc, size_t num_instructions) override { ASSERT(false, "InterpreterFallback({:016x} && {})", pc, num_instructions); }
void CallSVC(std::uint32_t swi) override { ASSERT(false && "CallSVC({})", swi); }
void CallSVC(std::uint32_t swi) override {
UNREACHABLE(); //ASSERT(false && "CallSVC({})", swi);
}
void ExceptionRaised(std::uint32_t pc, Dynarmic::A32::Exception) override { ASSERT(false && "ExceptionRaised({:016x})", pc); }
void ExceptionRaised(std::uint32_t pc, Dynarmic::A32::Exception) override {
UNREACHABLE(); //ASSERT(false && "ExceptionRaised({:016x})", pc);
}
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {

16
src/dynarmic/tests/A64/testenv.h

@ -107,9 +107,13 @@ public:
void InterpreterFallback(u64 pc, size_t num_instructions) override { ASSERT(false, "InterpreterFallback({:016x} && {})", pc, num_instructions); }
void CallSVC(std::uint32_t swi) override { ASSERT(false && "CallSVC({})", swi); }
void CallSVC(std::uint32_t swi) override {
UNREACHABLE(); //ASSERT(false && "CallSVC({})", swi);
}
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception /*exception*/) override { ASSERT(false && "ExceptionRaised({:016x})", pc); }
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception /*exception*/) override {
UNREACHABLE(); //ASSERT(false && "ExceptionRaised({:016x})", pc);
}
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {
@ -204,9 +208,13 @@ public:
void InterpreterFallback(u64 pc, size_t num_instructions) override { ASSERT(ignore_invalid_insn, "InterpreterFallback({:016x} && {})", pc, num_instructions); }
void CallSVC(std::uint32_t swi) override { ASSERT(false && "CallSVC({})", swi); }
void CallSVC(std::uint32_t swi) override {
UNREACHABLE(); //ASSERT(false && "CallSVC({})", swi);
}
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception) override { ASSERT(false && "ExceptionRaised({:016x})", pc); }
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception) override {
UNREACHABLE(); //ASSERT(false && "ExceptionRaised({:016x})", pc);
}
void AddTicks(std::uint64_t ticks) override {
if (ticks > ticks_left) {

2
src/dynarmic/tests/test_generator.cpp

@ -306,7 +306,7 @@ std::vector<u16> GenRandomThumbInst(u32 pc, bool is_last_inst, A32::ITState it_s
} else if (bitstring.substr(0, 8) == "11110100") {
bitstring.replace(0, 8, "11111001");
} else {
ASSERT(false && "Unhandled ASIMD instruction: {} {}", fn, bs);
UNREACHABLE(); // "Unhandled ASIMD instruction: {} {}", fn, bs);
}
if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) {
invalid.emplace_back(InstructionGenerator{bitstring.c_str()});

Loading…
Cancel
Save