Browse Source

[hle, service] fix errors related to race conditions triggering under SMG1 and SMG2 (#3927)

- service manager may add a service while someone else is finding it, so properly lock
- nvhost doesn't properly account for the fact that iterators GET FUCKING INVALIDATED
- use proper exit routine for mapping locked that failed (try/catch hell)

the last two were introduced by #3858, but the first one has been present since ???

either way, remember that ankerl map has invalidated iterators upon erase/insert, so i accounted for that and SMG1 (and probably smg2) boot fine now

Signed-off-by: lizzie <lizzie@eden-emu.dev>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3927
Reviewed-by: crueter <crueter@eden-emu.dev>
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
pull/3928/head
lizzie 4 days ago
committed by crueter
parent
commit
4f4c298a39
No known key found for this signature in database GPG Key ID: 425ACD2D4830EBC6
  1. 46
      src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
  2. 2
      src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
  3. 20
      src/core/hle/service/sm/sm.h

46
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp

@ -199,25 +199,28 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
return NvResult::Success;
}
void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
auto const it = mapping_map.find(offset);
auto const mapping = it->second;
if (!mapping.fixed) {
auto& allocator{mapping.big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size_bits{mapping.big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
u32 page_size{mapping.big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u64 aligned_size{Common::AlignUp(mapping.size, page_size)};
allocator.Free(u32(mapping.offset >> page_size_bits), u32(aligned_size >> page_size_bits));
}
nvmap.UnpinHandle(mapping.handle);
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully
if (mapping.sparse_alloc) {
gmmu->MapSparse(offset, mapping.size, mapping.big_page);
} else {
gmmu->Unmap(offset, mapping.size);
bool nvhost_as_gpu::FreeMappingLocked(u64 offset) noexcept {
if (auto const it = mapping_map.find(offset); it != mapping_map.end()) {
auto const mapping = it->second;
if (!mapping.fixed) {
auto& allocator{mapping.big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size_bits{mapping.big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
u32 page_size{mapping.big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u64 aligned_size{Common::AlignUp(mapping.size, page_size)};
allocator.Free(u32(mapping.offset >> page_size_bits), u32(aligned_size >> page_size_bits));
}
nvmap.UnpinHandle(mapping.handle);
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully
if (mapping.sparse_alloc) {
gmmu->MapSparse(offset, mapping.size, mapping.big_page);
} else {
gmmu->Unmap(offset, mapping.size);
}
mapping_map.erase(offset);
return true;
}
mapping_map.erase(it);
return false;
}
NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
@ -232,7 +235,8 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
return NvResult::BadValue;
for (const auto mapping_offset : allocation.mappings)
FreeMappingLocked(mapping_offset);
if (!FreeMappingLocked(mapping_offset))
return NvResult::BadValue;
// Unset sparse flag if required
if (allocation.sparse)
@ -402,8 +406,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
}
nvmap.UnpinHandle(mapping.handle);
mapping_map.erase(it);
map_buffer_offsets.erase(offset_it);
mapping_map.erase(params.offset);
map_buffer_offsets.erase(params.offset);
}
return NvResult::Success;
}

2
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h

@ -157,7 +157,7 @@ private:
NvResult GetVARegions1(IoctlGetVaRegions& params);
NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions);
void FreeMappingLocked(u64 offset);
[[nodiscard]] bool FreeMappingLocked(u64 offset) noexcept;
Module& module;

20
src/core/hle/service/sm/sm.h

@ -67,22 +67,24 @@ public:
Result GetServicePort(Kernel::KClientPort** out_client_port, const std::string& name);
template <std::derived_from<SessionRequestHandler> T>
std::shared_ptr<T> GetService(const std::string& service_name, bool block = false) const {
auto service = registered_services.find(service_name);
if (service == registered_services.end() && !block) {
LOG_DEBUG(Service, "Can't find service: {}", service_name);
std::shared_ptr<T> GetService(const std::string& name, bool block = false) const {
std::unique_lock l{lock};
auto it = registered_services.find(name);
if (it == registered_services.end() && !block) {
LOG_DEBUG(Service, "Can't find service: {}", name);
return nullptr;
} else if (block) {
using namespace std::literals::chrono_literals;
while (service == registered_services.end()) {
while (it == registered_services.end()) {
l.unlock();
Kernel::Svc::SleepThread(
kernel.System(),
std::chrono::duration_cast<std::chrono::nanoseconds>(100ms).count());
service = registered_services.find(service_name);
l.lock();
it = registered_services.find(name);
}
}
return std::static_pointer_cast<T>(service->second());
return std::static_pointer_cast<T>(it->second());
}
void InvokeControlRequest(HLERequestContext& context);
@ -96,7 +98,7 @@ private:
std::unique_ptr<Controller> controller_interface;
/// Map of registered services, retrieved using GetServicePort.
std::mutex lock;
mutable std::mutex lock;
ankerl::unordered_dense::map<std::string, SessionRequestHandlerFactory> registered_services;
ankerl::unordered_dense::map<std::string, Kernel::KClientPort*> service_ports;

Loading…
Cancel
Save