Browse Source

add kernel changes

pull/3004/head
Maufeat 3 months ago
parent
commit
abdba4021a
  1. 4
      src/core/hle/kernel/k_handle_table.cpp
  2. 2
      src/core/hle/kernel/k_handle_table.h
  3. 6
      src/core/hle/kernel/svc/svc_event.cpp
  4. 39
      src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp

4
src/core/hle/kernel/k_handle_table.cpp

@ -6,7 +6,7 @@
namespace Kernel {
Result KHandleTable::Finalize() {
void KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
@ -22,8 +22,6 @@ Result KHandleTable::Finalize() {
obj->Close();
}
}
R_SUCCEED();
}
bool KHandleTable::Remove(Handle handle) {

2
src/core/hle/kernel/k_handle_table.h

@ -68,7 +68,7 @@ public:
return m_max_count;
}
Result Finalize();
void Finalize();
bool Remove(Handle handle);
template <typename T = KAutoObject>

6
src/core/hle/kernel/svc/svc_event.cpp

@ -34,7 +34,8 @@ Result ClearEvent(Core::System& system, Handle event_handle) {
{
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
if (event.IsNotNull()) {
R_RETURN(event->Clear());
event->Clear();
R_SUCCEED();
}
}
@ -42,7 +43,8 @@ Result ClearEvent(Core::System& system, Handle event_handle) {
{
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
if (readable_event.IsNotNull()) {
R_RETURN(readable_event->Clear());
readable_event->Clear();
R_SUCCEED();
}
}

39
src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp

@ -179,7 +179,7 @@ NvResult nvhost_gpu::AllocGPFIFOEx(IoctlAllocGpfifoEx& params, DeviceFD fd) {
params.reserved[2]);
if (channel_state->initialized) {
LOG_CRITICAL(Service_NVDRV, "Already allocated!");
LOG_DEBUG(Service_NVDRV, "Channel already initialized; AllocGPFIFOEx returning AlreadyAllocated");
return NvResult::AlreadyAllocated;
}
@ -188,6 +188,15 @@ NvResult nvhost_gpu::AllocGPFIFOEx(IoctlAllocGpfifoEx& params, DeviceFD fd) {
program_id = session->process->GetProgramId();
}
// Store program id for later lazy initialization
channel_state->program_id = program_id;
// If address space is not yet bound, defer channel initialization.
if (!channel_state->memory_manager) {
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
return NvResult::Success;
}
system.GPU().InitChannel(*channel_state, program_id);
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
@ -203,7 +212,7 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx& params, DeviceFD fd) {
params.reserved[2]);
if (channel_state->initialized) {
LOG_CRITICAL(Service_NVDRV, "Already allocated!");
LOG_DEBUG(Service_NVDRV, "Channel already initialized; AllocGPFIFOEx2 returning AlreadyAllocated");
return NvResult::AlreadyAllocated;
}
@ -212,6 +221,15 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx& params, DeviceFD fd) {
program_id = session->process->GetProgramId();
}
// Store program id for later lazy initialization
channel_state->program_id = program_id;
// If address space is not yet bound, defer channel initialization.
if (!channel_state->memory_manager) {
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
return NvResult::Success;
}
system.GPU().InitChannel(*channel_state, program_id);
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
@ -236,9 +254,10 @@ NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
LOG_DEBUG(Service_NVDRV, "called, class_num={:#X}, flags={:#X}, obj_id={:#X}", params.class_num,
params.flags, params.obj_id);
if (!channel_state || !channel_state->initialized) {
LOG_CRITICAL(Service_NVDRV, "No address space bound to allocate a object context!");
return NvResult::NotInitialized;
// Do not require channel initialization here: some clients allocate contexts before binding.
if (!channel_state) {
LOG_ERROR(Service_NVDRV, "No channel state available!");
return NvResult::InvalidState;
}
std::scoped_lock lk(channel_mutex);
@ -260,11 +279,12 @@ NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
}
if (ctxObjs[ctx_class_number_index].has_value()) {
LOG_ERROR(Service_NVDRV, "Object context for class {:#X} already allocated on this channel",
params.class_num);
LOG_WARNING(Service_NVDRV, "Object context for class {:#X} already allocated on this channel",
params.class_num);
return NvResult::AlreadyAllocated;
}
// Defer actual hardware context binding until channel is initialized.
ctxObjs[ctx_class_number_index] = params;
return NvResult::Success;
@ -318,6 +338,11 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandL
std::scoped_lock lock(channel_mutex);
// Lazily initialize channel when address space is available
if (!channel_state->initialized && channel_state->memory_manager) {
system.GPU().InitChannel(*channel_state, channel_state->program_id);
}
const auto bind_id = channel_state->bind_id;
auto& flags = params.flags;

Loading…
Cancel
Save