20 changed files with 558 additions and 45 deletions
-
13src/common/bit_field.h
-
8src/core/CMakeLists.txt
-
41src/core/hle/service/nvdrv/core/container.cpp
-
38src/core/hle/service/nvdrv/core/container.h
-
245src/core/hle/service/nvdrv/core/nvmap.cpp
-
155src/core/hle/service/nvdrv/core/nvmap.h
-
6src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
-
4src/core/hle/service/nvdrv/core/syncpoint_manager.h
-
8src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
-
10src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
-
9src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
-
10src/core/hle/service/nvdrv/devices/nvhost_gpu.h
-
4src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
-
2src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
-
8src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
-
9src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
-
4src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
-
2src/core/hle/service/nvdrv/devices/nvhost_vic.h
-
16src/core/hle/service/nvdrv/nvdrv.cpp
-
11src/core/hle/service/nvdrv/nvdrv.h
@ -0,0 +1,41 @@ |
|||||
|
// Copyright 2021 yuzu emulator team
|
||||
|
// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||
|
#include "video_core/gpu.h"
|
||||
|
|
||||
|
namespace Service::Nvidia::NvCore { |
||||
|
|
||||
|
struct ContainerImpl { |
||||
|
ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} |
||||
|
NvMap file; |
||||
|
SyncpointManager manager; |
||||
|
}; |
||||
|
|
||||
|
Container::Container(Tegra::GPU& gpu_) { |
||||
|
impl = std::make_unique<ContainerImpl>(gpu_); |
||||
|
} |
||||
|
|
||||
|
Container::~Container() = default; |
||||
|
|
||||
|
NvMap& Container::GetNvMapFile() { |
||||
|
return impl->file; |
||||
|
} |
||||
|
|
||||
|
const NvMap& Container::GetNvMapFile() const { |
||||
|
return impl->file; |
||||
|
} |
||||
|
|
||||
|
SyncpointManager& Container::GetSyncpointManager() { |
||||
|
return impl->manager; |
||||
|
} |
||||
|
|
||||
|
const SyncpointManager& Container::GetSyncpointManager() const { |
||||
|
return impl->manager; |
||||
|
} |
||||
|
|
||||
|
} // namespace Service::Nvidia::NvCore
|
||||
@ -0,0 +1,38 @@ |
|||||
|
// Copyright 2021 yuzu emulator team |
||||
|
// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) |
||||
|
// Licensed under GPLv2 or any later version |
||||
|
// Refer to the license.txt file included. |
||||
|
|
||||
|
#pragma once |
||||
|
|
||||
|
#include <memory> |
||||
|
|
||||
|
namespace Tegra { |
||||
|
class GPU; |
||||
|
} |
||||
|
|
||||
|
namespace Service::Nvidia::NvCore { |
||||
|
|
||||
|
class NvMap; |
||||
|
class SyncpointManager; |
||||
|
|
||||
|
struct ContainerImpl; |
||||
|
|
||||
|
class Container { |
||||
|
public: |
||||
|
Container(Tegra::GPU& gpu_); |
||||
|
~Container(); |
||||
|
|
||||
|
NvMap& GetNvMapFile(); |
||||
|
|
||||
|
const NvMap& GetNvMapFile() const; |
||||
|
|
||||
|
SyncpointManager& GetSyncpointManager(); |
||||
|
|
||||
|
const SyncpointManager& GetSyncpointManager() const; |
||||
|
|
||||
|
private: |
||||
|
std::unique_ptr<ContainerImpl> impl; |
||||
|
}; |
||||
|
|
||||
|
} // namespace Service::Nvidia::NvCore |
||||
@ -0,0 +1,245 @@ |
|||||
|
// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||
|
// Licensed under GPLv2 or any later version
|
||||
|
// Refer to the license.txt file included.
|
||||
|
|
||||
|
#include "common/alignment.h"
|
||||
|
#include "common/assert.h"
|
||||
|
#include "common/logging/log.h"
|
||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||
|
#include "core/memory.h"
|
||||
|
|
||||
|
using Core::Memory::YUZU_PAGESIZE; |
||||
|
|
||||
|
namespace Service::Nvidia::NvCore { |
||||
|
NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) {} |
||||
|
|
||||
|
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { |
||||
|
std::scoped_lock lock(mutex); |
||||
|
|
||||
|
// Handles cannot be allocated twice
|
||||
|
if (allocated) [[unlikely]] |
||||
|
return NvResult::AccessDenied; |
||||
|
|
||||
|
flags = pFlags; |
||||
|
kind = pKind; |
||||
|
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; |
||||
|
|
||||
|
// This flag is only applicable for handles with an address passed
|
||||
|
if (pAddress) |
||||
|
flags.keep_uncached_after_free = 0; |
||||
|
else |
||||
|
LOG_CRITICAL(Service_NVDRV, |
||||
|
"Mapping nvmap handles without a CPU side address is unimplemented!"); |
||||
|
|
||||
|
size = Common::AlignUp(size, YUZU_PAGESIZE); |
||||
|
aligned_size = Common::AlignUp(size, align); |
||||
|
address = pAddress; |
||||
|
|
||||
|
// TODO: pin init
|
||||
|
|
||||
|
allocated = true; |
||||
|
|
||||
|
return NvResult::Success; |
||||
|
} |
||||
|
|
||||
|
NvResult NvMap::Handle::Duplicate(bool internal_session) { |
||||
|
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
||||
|
if (!allocated) [[unlikely]] |
||||
|
return NvResult::BadValue; |
||||
|
|
||||
|
std::scoped_lock lock(mutex); |
||||
|
|
||||
|
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
||||
|
// us not implementing per-process handle refs.
|
||||
|
if (internal_session) |
||||
|
internal_dupes++; |
||||
|
else |
||||
|
dupes++; |
||||
|
|
||||
|
return NvResult::Success; |
||||
|
} |
||||
|
|
||||
|
NvMap::NvMap() = default; |
||||
|
|
||||
|
void NvMap::AddHandle(std::shared_ptr<Handle> handleDesc) { |
||||
|
std::scoped_lock lock(handles_lock); |
||||
|
|
||||
|
handles.emplace(handleDesc->id, std::move(handleDesc)); |
||||
|
} |
||||
|
|
||||
|
void NvMap::UnmapHandle(Handle& handleDesc) { |
||||
|
// Remove pending unmap queue entry if needed
|
||||
|
if (handleDesc.unmap_queue_entry) { |
||||
|
unmap_queue.erase(*handleDesc.unmap_queue_entry); |
||||
|
handleDesc.unmap_queue_entry.reset(); |
||||
|
} |
||||
|
|
||||
|
// Free and unmap the handle from the SMMU
|
||||
|
/*
|
||||
|
state.soc->smmu.Unmap(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); |
||||
|
smmuAllocator.Free(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); |
||||
|
handleDesc.pin_virt_address = 0; |
||||
|
*/ |
||||
|
} |
||||
|
|
||||
|
bool NvMap::TryRemoveHandle(const Handle& handleDesc) { |
||||
|
// No dupes left, we can remove from handle map
|
||||
|
if (handleDesc.dupes == 0 && handleDesc.internal_dupes == 0) { |
||||
|
std::scoped_lock lock(handles_lock); |
||||
|
|
||||
|
auto it{handles.find(handleDesc.id)}; |
||||
|
if (it != handles.end()) |
||||
|
handles.erase(it); |
||||
|
|
||||
|
return true; |
||||
|
} else { |
||||
|
return false; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) { |
||||
|
if (!size) [[unlikely]] |
||||
|
return NvResult::BadValue; |
||||
|
|
||||
|
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; |
||||
|
auto handleDesc{std::make_shared<Handle>(size, id)}; |
||||
|
AddHandle(handleDesc); |
||||
|
|
||||
|
result_out = handleDesc; |
||||
|
return NvResult::Success; |
||||
|
} |
||||
|
|
||||
|
std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { |
||||
|
std::scoped_lock lock(handles_lock); |
||||
|
try { |
||||
|
return handles.at(handle); |
||||
|
} catch ([[maybe_unused]] std::out_of_range& e) { |
||||
|
return nullptr; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
u32 NvMap::PinHandle(NvMap::Handle::Id handle) { |
||||
|
UNIMPLEMENTED_MSG("pinning"); |
||||
|
return 0; |
||||
|
/*
|
||||
|
auto handleDesc{GetHandle(handle)}; |
||||
|
if (!handleDesc) |
||||
|
[[unlikely]] return 0; |
||||
|
|
||||
|
std::scoped_lock lock(handleDesc->mutex); |
||||
|
if (!handleDesc->pins) { |
||||
|
// If we're in the unmap queue we can just remove ourselves and return since we're already
|
||||
|
// mapped
|
||||
|
{ |
||||
|
// Lock now to prevent our queue entry from being removed for allocation in-between the
|
||||
|
// following check and erase
|
||||
|
std::scoped_lock queueLock(unmap_queue_lock); |
||||
|
if (handleDesc->unmap_queue_entry) { |
||||
|
unmap_queue.erase(*handleDesc->unmap_queue_entry); |
||||
|
handleDesc->unmap_queue_entry.reset(); |
||||
|
|
||||
|
handleDesc->pins++; |
||||
|
return handleDesc->pin_virt_address; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// If not then allocate some space and map it
|
||||
|
u32 address{}; |
||||
|
while (!(address = smmuAllocator.Allocate(static_cast<u32>(handleDesc->aligned_size)))) { |
||||
|
// Free handles until the allocation succeeds
|
||||
|
std::scoped_lock queueLock(unmap_queue_lock); |
||||
|
if (auto freeHandleDesc{unmap_queue.front()}) { |
||||
|
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
|
||||
|
// checking if they are before unmapping
|
||||
|
std::scoped_lock freeLock(freeHandleDesc->mutex); |
||||
|
if (handleDesc->pin_virt_address) |
||||
|
UnmapHandle(*freeHandleDesc); |
||||
|
} else { |
||||
|
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
state.soc->smmu.Map(address, handleDesc->GetPointer(), |
||||
|
static_cast<u32>(handleDesc->aligned_size)); |
||||
|
handleDesc->pin_virt_address = address; |
||||
|
} |
||||
|
|
||||
|
handleDesc->pins++; |
||||
|
return handleDesc->pin_virt_address; |
||||
|
*/ |
||||
|
} |
||||
|
|
||||
|
void NvMap::UnpinHandle(Handle::Id handle) { |
||||
|
UNIMPLEMENTED_MSG("Unpinning"); |
||||
|
/*
|
||||
|
auto handleDesc{GetHandle(handle)}; |
||||
|
if (!handleDesc) |
||||
|
return; |
||||
|
|
||||
|
std::scoped_lock lock(handleDesc->mutex); |
||||
|
if (--handleDesc->pins < 0) { |
||||
|
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); |
||||
|
} else if (!handleDesc->pins) { |
||||
|
std::scoped_lock queueLock(unmap_queue_lock); |
||||
|
|
||||
|
// Add to the unmap queue allowing this handle's memory to be freed if needed
|
||||
|
unmap_queue.push_back(handleDesc); |
||||
|
handleDesc->unmap_queue_entry = std::prev(unmap_queue.end()); |
||||
|
} |
||||
|
*/ |
||||
|
} |
||||
|
|
||||
|
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) { |
||||
|
std::weak_ptr<Handle> hWeak{GetHandle(handle)}; |
||||
|
FreeInfo freeInfo; |
||||
|
|
||||
|
// We use a weak ptr here so we can tell when the handle has been freed and report that back to
|
||||
|
// guest
|
||||
|
if (auto handleDesc = hWeak.lock()) { |
||||
|
std::scoped_lock lock(handleDesc->mutex); |
||||
|
|
||||
|
if (internal_session) { |
||||
|
if (--handleDesc->internal_dupes < 0) |
||||
|
LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); |
||||
|
} else { |
||||
|
if (--handleDesc->dupes < 0) { |
||||
|
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); |
||||
|
} else if (handleDesc->dupes == 0) { |
||||
|
// Force unmap the handle
|
||||
|
if (handleDesc->pin_virt_address) { |
||||
|
std::scoped_lock queueLock(unmap_queue_lock); |
||||
|
UnmapHandle(*handleDesc); |
||||
|
} |
||||
|
|
||||
|
handleDesc->pins = 0; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
|
||||
|
// handle then it will now be freed when `handleDesc` goes out of scope
|
||||
|
if (TryRemoveHandle(*handleDesc)) |
||||
|
LOG_ERROR(Service_NVDRV, "Removed nvmap handle: {}", handle); |
||||
|
else |
||||
|
LOG_ERROR(Service_NVDRV, |
||||
|
"Tried to free nvmap handle: {} but didn't as it still has duplicates", |
||||
|
handle); |
||||
|
|
||||
|
freeInfo = { |
||||
|
.address = handleDesc->address, |
||||
|
.size = handleDesc->size, |
||||
|
.was_uncached = handleDesc->flags.map_uncached.Value() != 0, |
||||
|
}; |
||||
|
} else { |
||||
|
return std::nullopt; |
||||
|
} |
||||
|
|
||||
|
// Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
|
||||
|
if (!hWeak.expired()) { |
||||
|
LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); |
||||
|
freeInfo.address = 0; |
||||
|
} |
||||
|
|
||||
|
return freeInfo; |
||||
|
} |
||||
|
|
||||
|
} // namespace Service::Nvidia::NvCore
|
||||
@ -0,0 +1,155 @@ |
|||||
|
// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) |
||||
|
// Licensed under GPLv2 or any later version |
||||
|
// Refer to the license.txt file included. |
||||
|
|
||||
|
#pragma once |
||||
|
|
||||
|
#include <list> |
||||
|
#include <memory> |
||||
|
#include <mutex> |
||||
|
#include <optional> |
||||
|
#include <unordered_map> |
||||
|
#include <assert.h> |
||||
|
|
||||
|
#include "common/bit_field.h" |
||||
|
#include "common/common_types.h" |
||||
|
#include "core/hle/service/nvdrv/nvdata.h" |
||||
|
|
||||
|
namespace Service::Nvidia::NvCore { |
||||
|
/** |
||||
|
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage |
||||
|
* handles |
||||
|
*/ |
||||
|
class NvMap { |
||||
|
public: |
||||
|
/** |
||||
|
* @brief A handle to a contiguous block of memory in an application's address space |
||||
|
*/ |
||||
|
struct Handle { |
||||
|
std::mutex mutex; |
||||
|
|
||||
|
u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU |
||||
|
u64 size; //!< Page-aligned size of the memory the handle refers to |
||||
|
u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to |
||||
|
u64 orig_size; //!< Original unaligned size of the memory this handle refers to |
||||
|
|
||||
|
s32 dupes{1}; //!< How many guest references there are to this handle |
||||
|
s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle |
||||
|
|
||||
|
using Id = u32; |
||||
|
Id id; //!< A globally unique identifier for this handle |
||||
|
|
||||
|
s32 pins{}; |
||||
|
u32 pin_virt_address{}; |
||||
|
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; |
||||
|
|
||||
|
union Flags { |
||||
|
BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached |
||||
|
BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was |
||||
|
//!< allocated with a fixed address |
||||
|
BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins |
||||
|
} flags{}; |
||||
|
static_assert(sizeof(Flags) == sizeof(u32)); |
||||
|
|
||||
|
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, |
||||
|
//!< this can also be in the nvdrv tmem |
||||
|
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC |
||||
|
//!< call |
||||
|
|
||||
|
u8 kind{}; //!< Used for memory compression |
||||
|
bool allocated{}; //!< If the handle has been allocated with `Alloc` |
||||
|
|
||||
|
Handle(u64 size, Id id); |
||||
|
|
||||
|
/** |
||||
|
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem |
||||
|
* if a 0 address is passed |
||||
|
*/ |
||||
|
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); |
||||
|
|
||||
|
/** |
||||
|
* @brief Increases the dupe counter of the handle for the given session |
||||
|
*/ |
||||
|
[[nodiscard]] NvResult Duplicate(bool internal_session); |
||||
|
|
||||
|
/** |
||||
|
* @brief Obtains a pointer to the handle's memory and marks the handle it as having been |
||||
|
* mapped |
||||
|
*/ |
||||
|
u8* GetPointer() { |
||||
|
if (!address) { |
||||
|
return nullptr; |
||||
|
} |
||||
|
|
||||
|
is_shared_mem_mapped = true; |
||||
|
return reinterpret_cast<u8*>(address); |
||||
|
} |
||||
|
}; |
||||
|
|
||||
|
private: |
||||
|
std::list<std::shared_ptr<Handle>> unmap_queue; |
||||
|
std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` |
||||
|
|
||||
|
std::unordered_map<Handle::Id, std::shared_ptr<Handle>> handles; //!< Main owning map of handles |
||||
|
std::mutex handles_lock; //!< Protects access to `handles` |
||||
|
|
||||
|
static constexpr u32 HandleIdIncrement{ |
||||
|
4}; //!< Each new handle ID is an increment of 4 from the previous |
||||
|
std::atomic<u32> next_handle_id{HandleIdIncrement}; |
||||
|
|
||||
|
void AddHandle(std::shared_ptr<Handle> handle); |
||||
|
|
||||
|
/** |
||||
|
* @brief Unmaps and frees the SMMU memory region a handle is mapped to |
||||
|
* @note Both `unmap_queue_lock` and `handleDesc.mutex` MUST be locked when calling this |
||||
|
*/ |
||||
|
void UnmapHandle(Handle& handleDesc); |
||||
|
|
||||
|
/** |
||||
|
* @brief Removes a handle from the map taking its dupes into account |
||||
|
* @note handleDesc.mutex MUST be locked when calling this |
||||
|
* @return If the handle was removed from the map |
||||
|
*/ |
||||
|
bool TryRemoveHandle(const Handle& handleDesc); |
||||
|
|
||||
|
public: |
||||
|
/** |
||||
|
* @brief Encapsulates the result of a FreeHandle operation |
||||
|
*/ |
||||
|
struct FreeInfo { |
||||
|
u64 address; //!< Address the handle referred to before deletion |
||||
|
u64 size; //!< Page-aligned handle size |
||||
|
bool was_uncached; //!< If the handle was allocated as uncached |
||||
|
}; |
||||
|
|
||||
|
NvMap(); |
||||
|
|
||||
|
/** |
||||
|
* @brief Creates an unallocated handle of the given size |
||||
|
*/ |
||||
|
[[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out); |
||||
|
|
||||
|
std::shared_ptr<Handle> GetHandle(Handle::Id handle); |
||||
|
|
||||
|
/** |
||||
|
* @brief Maps a handle into the SMMU address space |
||||
|
* @note This operation is refcounted, the number of calls to this must eventually match the |
||||
|
* number of calls to `UnpinHandle` |
||||
|
* @return The SMMU virtual address that the handle has been mapped to |
||||
|
*/ |
||||
|
u32 PinHandle(Handle::Id handle); |
||||
|
|
||||
|
/** |
||||
|
* @brief When this has been called an equal number of times to `PinHandle` for the supplied |
||||
|
* handle it will be added to a list of handles to be freed when necessary |
||||
|
*/ |
||||
|
void UnpinHandle(Handle::Id handle); |
||||
|
|
||||
|
/** |
||||
|
* @brief Tries to free a handle and remove a single dupe |
||||
|
* @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned |
||||
|
* describing the prior state of the handle |
||||
|
*/ |
||||
|
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session); |
||||
|
}; |
||||
|
} // namespace Service::Nvidia::NvCore |
||||
Write
Preview
Loading…
Cancel
Save
Reference in new issue