Browse Source

[core/hle/service/nvdrv] fix Nvmap storage being pointer-unstable due to ankerl maps

Signed-off-by: lizzie <lizzie@eden-emu.dev>
lizzie/fix-nvmap-handles
lizzie 3 days ago
parent
commit
ebe654b491
  1. 139
      src/core/hle/service/nvdrv/core/nvmap.cpp
  2. 60
      src/core/hle/service/nvdrv/core/nvmap.h
  3. 8
      src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
  4. 44
      src/core/hle/service/nvdrv/devices/nvmap.cpp

139
src/core/hle/service/nvdrv/core/nvmap.cpp

@ -71,10 +71,9 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {}
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
std::scoped_lock lock(handles_lock);
handles.emplace(handle_description->id, std::move(handle_description));
void NvMap::AddHandle(Handle&& handle_description) {
std::scoped_lock l(handles_lock);
handles.emplace(handle_description.id, std::move(handle_description));
}
void NvMap::UnmapHandle(Handle& handle_description) {
@ -113,65 +112,56 @@ void NvMap::UnmapHandle(Handle& handle_description) {
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
// No dupes left, we can remove from handle map
if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
std::scoped_lock lock(handles_lock);
auto it{handles.find(handle_description.id)};
std::scoped_lock l(handles_lock);
auto it = handles.find(handle_description.id);
if (it != handles.end()) {
handles.erase(it);
}
return true;
} else {
return false;
}
}
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
if (!size) [[unlikely]] {
NvResult NvMap::CreateHandle(u64 size, Handle::Id& out_handle) {
if (!Common::AlignUp(size, YUZU_PAGESIZE)) {
return NvResult::BadValue;
}
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
auto handle_description{std::make_shared<Handle>(size, id)};
AddHandle(handle_description);
result_out = handle_description;
u32 id = next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed);
AddHandle(Handle(size, id));
out_handle = id;
return NvResult::Success;
}
std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
std::optional<std::reference_wrapper<NvMap::Handle>> NvMap::GetHandle(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
return handles.at(handle);
} catch (std::out_of_range&) {
return nullptr;
}
if (auto const it = handles.find(handle); it != handles.end())
return {it->second};
return std::nullopt;
}
DAddr NvMap::GetHandleAddress(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
return handles.at(handle)->d_address;
} catch (std::out_of_range&) {
return 0;
}
if (auto const it = handles.find(handle); it != handles.end())
return it->second.d_address;
return 0;
}
DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
auto handle_description{GetHandle(handle)};
if (!handle_description) [[unlikely]] {
auto o = GetHandle(handle);
if (!o) [[unlikely]] {
return 0;
}
auto handle_description = &o->get();
std::scoped_lock lock(handle_description->mutex);
const auto map_low_area = [&] {
if (handle_description->pin_virt_address == 0) {
auto& gmmu_allocator = host1x.Allocator();
auto& gmmu = host1x.GMMU();
u32 address =
gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size));
gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address,
handle_description->aligned_size);
u32 address = gmmu_allocator.Allocate(u32(handle_description->aligned_size));
gmmu.Map(GPUVAddr(address), handle_description->d_address, handle_description->aligned_size);
handle_description->pin_virt_address = address;
}
};
@ -181,17 +171,15 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
{
// Lock now to prevent our queue entry from being removed for allocation in-between the
// following check and erase
std::scoped_lock queueLock(unmap_queue_lock);
std::scoped_lock ql(unmap_queue_lock);
if (handle_description->unmap_queue_entry) {
unmap_queue.erase(*handle_description->unmap_queue_entry);
handle_description->unmap_queue_entry.reset();
if (low_area_pin) {
map_low_area();
handle_description->pins++;
return static_cast<DAddr>(handle_description->pin_virt_address);
return DAddr(handle_description->pin_virt_address);
}
handle_description->pins++;
return handle_description->d_address;
}
@ -212,12 +200,12 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
while ((address = smmu.Allocate(aligned_up)) == 0) {
// Free handles until the allocation succeeds
std::scoped_lock queueLock(unmap_queue_lock);
if (auto freeHandleDesc{unmap_queue.front()}) {
if (auto free_handle = handles.find(unmap_queue.front()); free_handle != handles.end()) {
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
// checking if they are before unmapping
std::scoped_lock freeLock(freeHandleDesc->mutex);
std::scoped_lock fl(free_handle->second.mutex);
if (handle_description->d_address)
UnmapHandle(*freeHandleDesc);
UnmapHandle(free_handle->second);
} else {
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
}
@ -235,51 +223,44 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
handle_description->pins++;
if (low_area_pin) {
return static_cast<DAddr>(handle_description->pin_virt_address);
return DAddr(handle_description->pin_virt_address);
}
return handle_description->d_address;
}
void NvMap::UnpinHandle(Handle::Id handle) {
auto handle_description{GetHandle(handle)};
if (!handle_description) {
return;
}
std::scoped_lock lock(handle_description->mutex);
if (--handle_description->pins < 0) {
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
} else if (!handle_description->pins) {
std::scoped_lock queueLock(unmap_queue_lock);
// Add to the unmap queue allowing this handle's memory to be freed if needed
unmap_queue.push_back(handle_description);
handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
if (auto o = GetHandle(handle); o) {
auto handle_description = &o->get();
std::scoped_lock lock(handle_description->mutex);
if (--handle_description->pins < 0) {
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
} else if (!handle_description->pins) {
std::scoped_lock ql(unmap_queue_lock);
// Add to the unmap queue allowing this handle's memory to be freed if needed
unmap_queue.push_back(handle);
handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
}
}
}
void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) {
auto handle_description{GetHandle(handle)};
if (!handle_description) {
auto o = GetHandle(handle);
if (!o) {
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
return;
}
auto result = handle_description->Duplicate(internal_session);
auto result = o->get().Duplicate(internal_session);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
}
}
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
std::weak_ptr<Handle> hWeak{GetHandle(handle)};
FreeInfo freeInfo;
// We use a weak ptr here so we can tell when the handle has been freed and report that back to
// guest
if (auto handle_description = hWeak.lock()) {
std::scoped_lock lock(handle_description->mutex);
if (auto o = GetHandle(handle); o) {
auto handle_description = &o->get();
std::scoped_lock l(handle_description->mutex);
if (internal_session) {
if (--handle_description->internal_dupes < 0)
LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
@ -289,25 +270,25 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
} else if (handle_description->dupes == 0) {
// Force unmap the handle
if (handle_description->d_address) {
std::scoped_lock queueLock(unmap_queue_lock);
std::scoped_lock ql(unmap_queue_lock);
UnmapHandle(*handle_description);
}
handle_description->pins = 0;
}
}
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
// handle then it will now be freed when `handle_description` goes out of scope
if (TryRemoveHandle(*handle_description)) {
LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
} else {
LOG_DEBUG(Service_NVDRV,
"Tried to free nvmap handle: {} but didn't as it still has duplicates",
handle);
LOG_DEBUG(Service_NVDRV, "Tried to free nvmap handle: {} but didn't as it still has duplicates", handle);
}
freeInfo = {
// // If the handle hasn't been freed from memory, mark that
// if (!hWeak.expired()) {
// LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
// freeInfo.can_unlock = false;
// }
return FreeInfo{
.address = handle_description->address,
.size = handle_description->size,
.was_uncached = handle_description->flags.map_uncached.Value() != 0,
@ -316,14 +297,6 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
} else {
return std::nullopt;
}
// If the handle hasn't been freed from memory, mark that
if (!hWeak.expired()) {
LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
freeInfo.can_unlock = false;
}
return freeInfo;
}
void NvMap::UnmapAllHandles(NvCore::SessionId session_id) {
@ -334,8 +307,8 @@ void NvMap::UnmapAllHandles(NvCore::SessionId session_id) {
for (auto& [id, handle] : handles_copy) {
{
std::scoped_lock lk{handle->mutex};
if (handle->session_id.id != session_id.id || handle->dupes <= 0) {
std::scoped_lock lk{handle.mutex};
if (handle.session_id.id != session_id.id || handle.dupes <= 0) {
continue;
}
}

60
src/core/hle/service/nvdrv/core/nvmap.h

@ -12,6 +12,7 @@
#include <memory>
#include <mutex>
#include <optional>
#include <boost/unordered/unordered_node_map.hpp>
#include <ankerl/unordered_dense.h>
#include <assert.h>
@ -31,55 +32,39 @@ class Host1x;
namespace Service::Nvidia::NvCore {
class Container;
/**
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage
* handles
*/
/// @brief The nvmap core class holds the global state for nvmap and provides methods to manage handles
class NvMap {
public:
/**
* @brief A handle to a contiguous block of memory in an application's address space
*/
/// @brief A handle to a contiguous block of memory in an application's address space
struct Handle {
using Id = u32;
std::mutex mutex;
std::optional<typename std::list<Handle::Id>::iterator> unmap_queue_entry{};
u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
u64 size; //!< Page-aligned size of the memory the handle refers to
u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to
u64 orig_size; //!< Original unaligned size of the memory this handle refers to
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, this can also be in the nvdrv tmem
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, this can also be in the nvdrv tmem
s64 pins{};
s32 dupes{1}; //!< How many guest references there are to this handle
s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle
using Id = u32;
Id id; //!< A globally unique identifier for this handle
s64 pins{};
u32 pin_virt_address{};
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
union Flags {
u32 raw;
BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached
BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was
//!< allocated with a fixed address
BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was allocated with a fixed address
BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
} flags{};
static_assert(sizeof(Flags) == sizeof(u32));
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
//!< this can also be in the nvdrv tmem
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
//!< call
u8 kind{}; //!< Used for memory compression
bool allocated{}; //!< If the handle has been allocated with `Alloc`
bool in_heap{};
NvCore::SessionId session_id{};
u8 kind{}; //!< Used for memory compression
bool allocated : 1 = false; //!< If the handle has been allocated with `Alloc`
bool in_heap : 1 = false;
bool is_shared_mem_mapped : 1 = false; //!< If this nvmap has been mapped with the MapSharedMem IPC < call
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
//!< to, this can also be in the nvdrv tmem
Handle() = default;
Handle(u64 size, Id id);
/**
@ -123,9 +108,9 @@ public:
/**
* @brief Creates an unallocated handle of the given size
*/
[[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out);
[[nodiscard]] NvResult CreateHandle(u64 size, Handle::Id& out_handle);
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
std::optional<std::reference_wrapper<Handle>> GetHandle(Handle::Id handle);
DAddr GetHandleAddress(Handle::Id handle);
@ -158,19 +143,16 @@ public:
void UnmapAllHandles(NvCore::SessionId session_id);
private:
std::list<std::shared_ptr<Handle>> unmap_queue{};
std::list<Handle::Id> unmap_queue{};
boost::unordered_node_map<Handle::Id, Handle> handles{}; //!< Main owning map of handles
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
ankerl::unordered_dense::map<Handle::Id, std::shared_ptr<Handle>>
handles{}; //!< Main owning map of handles
std::mutex handles_lock; //!< Protects access to `handles`
static constexpr u32 HandleIdIncrement{
4}; //!< Each new handle ID is an increment of 4 from the previous
static constexpr u32 HandleIdIncrement{4}; //!< Each new handle ID is an increment of 4 from the previous
std::atomic<u32> next_handle_id{HandleIdIncrement};
Tegra::Host1x::Host1x& host1x;
void AddHandle(std::shared_ptr<Handle> handle);
void AddHandle(Handle&& handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to

8
src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp

@ -351,13 +351,13 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
}
}
auto handle{nvmap.GetHandle(params.handle)};
if (!handle) {
auto o = nvmap.GetHandle(params.handle);
if (!o) {
return NvResult::BadValue;
}
auto handle = &o->get();
DAddr device_address{
static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)};
DAddr device_address = DAddr(nvmap.PinHandle(params.handle, false) + params.buffer_offset);
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
bool big_page{[&]() {

44
src/core/hle/service/nvdrv/devices/nvmap.cpp

@ -83,17 +83,14 @@ void nvmap::OnClose(DeviceFD fd) {
NvResult nvmap::IocCreate(IocCreateParams& params) {
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
auto result =
file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
NvCore::NvMap::Handle handle_description(0, 0);
// Orig size is the unaligned size, set the handle to that
auto result = file.CreateHandle(params.size, params.handle);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
return result;
}
handle_description->orig_size = params.size; // Orig size is the unaligned size
params.handle = handle_description->id;
LOG_DEBUG(Service_NVDRV, "handle: {}, size: {:#X}", handle_description->id, params.size);
LOG_DEBUG(Service_NVDRV, "handle: {}, size: {:#X}", params.handle, params.size);
return NvResult::Success;
}
@ -115,30 +112,26 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
params.align = YUZU_PAGESIZE;
}
auto handle_description{file.GetHandle(params.handle)};
if (!handle_description) {
auto o = file.GetHandle(params.handle);
if (!o) {
LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
return NvResult::BadValue;
}
auto handle_description = &o->get();
if (handle_description->allocated) {
LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
return NvResult::InsufficientMemory;
}
const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
params.address, sessions[fd]);
const auto result = handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
return result;
}
bool is_out_io{};
auto process = container.GetSession(sessions[fd])->process;
ASSERT(process->GetPageTable()
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
handle_description->size,
Kernel::KMemoryPermission::None, true, false)
.IsSuccess());
ASSERT(process->GetPageTable().LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, handle_description->size, Kernel::KMemoryPermission::None, true, false).IsSuccess());
return result;
}
@ -151,13 +144,12 @@ NvResult nvmap::IocGetId(IocGetIdParams& params) {
return NvResult::BadValue;
}
auto handle_description{file.GetHandle(params.handle)};
if (!handle_description) {
auto o = file.GetHandle(params.handle);
if (!o) {
LOG_CRITICAL(Service_NVDRV, "Error!");
return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
// handle exists or not
return NvResult::AccessDenied; // This will always return EPERM irrespective of if the handle exists or not
}
auto handle_description = &o->get();
params.id = handle_description->id;
return NvResult::Success;
}
@ -174,12 +166,13 @@ NvResult nvmap::IocFromId(IocFromIdParams& params) {
return NvResult::BadValue;
}
auto handle_description{file.GetHandle(params.id)};
if (!handle_description) {
auto o = file.GetHandle(params.id);
if (!o) {
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
return NvResult::BadValue;
}
auto handle_description = &o->get();
auto result = handle_description->Duplicate(false);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
@ -199,12 +192,13 @@ NvResult nvmap::IocParam(IocParamParams& params) {
return NvResult::BadValue;
}
auto handle_description{file.GetHandle(params.handle)};
if (!handle_description) {
auto o = file.GetHandle(params.handle);
if (!o) {
LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
return NvResult::BadValue;
}
auto handle_description = &o->get();
switch (params.param) {
case HandleParameterType::Size:
params.result = static_cast<u32_le>(handle_description->orig_size);

Loading…
Cancel
Save