|
|
|
@ -5,6 +5,8 @@ |
|
|
|
|
|
|
|
#include <algorithm> |
|
|
|
#include <cstring> |
|
|
|
#include <deque> |
|
|
|
#include <functional> |
|
|
|
#include <memory> |
|
|
|
#include <queue> |
|
|
|
|
|
|
|
@ -19,28 +21,7 @@ namespace VideoCommon { |
|
|
|
|
|
|
|
class FenceBase { |
|
|
|
public: |
|
|
|
explicit FenceBase(u32 payload_, bool is_stubbed_) |
|
|
|
: address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {} |
|
|
|
|
|
|
|
explicit FenceBase(u8* address_, u32 payload_, bool is_stubbed_) |
|
|
|
: address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {} |
|
|
|
|
|
|
|
u8* GetAddress() const { |
|
|
|
return address; |
|
|
|
} |
|
|
|
|
|
|
|
u32 GetPayload() const { |
|
|
|
return payload; |
|
|
|
} |
|
|
|
|
|
|
|
bool IsSemaphore() const { |
|
|
|
return is_semaphore; |
|
|
|
} |
|
|
|
|
|
|
|
private: |
|
|
|
u8* address; |
|
|
|
u32 payload; |
|
|
|
bool is_semaphore; |
|
|
|
explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {} |
|
|
|
|
|
|
|
protected: |
|
|
|
bool is_stubbed; |
|
|
|
@ -60,31 +41,28 @@ public: |
|
|
|
buffer_cache.AccumulateFlushes(); |
|
|
|
} |
|
|
|
|
|
|
|
void SignalSemaphore(u8* addr, u32 value) { |
|
|
|
void SyncOperation(std::function<void()>&& func) { |
|
|
|
uncommitted_operations.emplace_back(std::move(func)); |
|
|
|
} |
|
|
|
|
|
|
|
void SignalFence(std::function<void()>&& func) { |
|
|
|
TryReleasePendingFences(); |
|
|
|
const bool should_flush = ShouldFlush(); |
|
|
|
CommitAsyncFlushes(); |
|
|
|
TFence new_fence = CreateFence(addr, value, !should_flush); |
|
|
|
uncommitted_operations.emplace_back(std::move(func)); |
|
|
|
CommitOperations(); |
|
|
|
TFence new_fence = CreateFence(!should_flush); |
|
|
|
fences.push(new_fence); |
|
|
|
QueueFence(new_fence); |
|
|
|
if (should_flush) { |
|
|
|
rasterizer.FlushCommands(); |
|
|
|
} |
|
|
|
rasterizer.SyncGuestHost(); |
|
|
|
} |
|
|
|
|
|
|
|
void SignalSyncPoint(u32 value) { |
|
|
|
syncpoint_manager.IncrementGuest(value); |
|
|
|
TryReleasePendingFences(); |
|
|
|
const bool should_flush = ShouldFlush(); |
|
|
|
CommitAsyncFlushes(); |
|
|
|
TFence new_fence = CreateFence(value, !should_flush); |
|
|
|
fences.push(new_fence); |
|
|
|
QueueFence(new_fence); |
|
|
|
if (should_flush) { |
|
|
|
rasterizer.FlushCommands(); |
|
|
|
} |
|
|
|
rasterizer.SyncGuestHost(); |
|
|
|
std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); }); |
|
|
|
SignalFence(std::move(func)); |
|
|
|
} |
|
|
|
|
|
|
|
void WaitPendingFences() { |
|
|
|
@ -94,12 +72,10 @@ public: |
|
|
|
WaitFence(current_fence); |
|
|
|
} |
|
|
|
PopAsyncFlushes(); |
|
|
|
if (current_fence->IsSemaphore()) { |
|
|
|
char* address = reinterpret_cast<char*>(current_fence->GetAddress()); |
|
|
|
auto payload = current_fence->GetPayload(); |
|
|
|
std::memcpy(address, &payload, sizeof(payload)); |
|
|
|
} else { |
|
|
|
syncpoint_manager.IncrementHost(current_fence->GetPayload()); |
|
|
|
auto operations = std::move(pending_operations.front()); |
|
|
|
pending_operations.pop_front(); |
|
|
|
for (auto& operation : operations) { |
|
|
|
operation(); |
|
|
|
} |
|
|
|
PopFence(); |
|
|
|
} |
|
|
|
@ -114,11 +90,9 @@ protected: |
|
|
|
|
|
|
|
virtual ~FenceManager() = default; |
|
|
|
|
|
|
|
/// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is |
|
|
|
/// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is |
|
|
|
/// true |
|
|
|
virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; |
|
|
|
/// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true |
|
|
|
virtual TFence CreateFence(u8* addr, u32 value, bool is_stubbed) = 0; |
|
|
|
virtual TFence CreateFence(bool is_stubbed) = 0; |
|
|
|
/// Queues a fence into the backend if the fence isn't stubbed. |
|
|
|
virtual void QueueFence(TFence& fence) = 0; |
|
|
|
/// Notifies that the backend fence has been signaled/reached in host GPU. |
|
|
|
@ -141,12 +115,10 @@ private: |
|
|
|
return; |
|
|
|
} |
|
|
|
PopAsyncFlushes(); |
|
|
|
if (current_fence->IsSemaphore()) { |
|
|
|
char* address = reinterpret_cast<char*>(current_fence->GetAddress()); |
|
|
|
const auto payload = current_fence->GetPayload(); |
|
|
|
std::memcpy(address, &payload, sizeof(payload)); |
|
|
|
} else { |
|
|
|
syncpoint_manager.IncrementHost(current_fence->GetPayload()); |
|
|
|
auto operations = std::move(pending_operations.front()); |
|
|
|
pending_operations.pop_front(); |
|
|
|
for (auto& operation : operations) { |
|
|
|
operation(); |
|
|
|
} |
|
|
|
PopFence(); |
|
|
|
} |
|
|
|
@ -165,16 +137,20 @@ private: |
|
|
|
} |
|
|
|
|
|
|
|
void PopAsyncFlushes() { |
|
|
|
{ |
|
|
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
|
|
|
texture_cache.PopAsyncFlushes(); |
|
|
|
buffer_cache.PopAsyncFlushes(); |
|
|
|
} |
|
|
|
query_cache.PopAsyncFlushes(); |
|
|
|
} |
|
|
|
|
|
|
|
void CommitAsyncFlushes() { |
|
|
|
{ |
|
|
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; |
|
|
|
texture_cache.CommitAsyncFlushes(); |
|
|
|
buffer_cache.CommitAsyncFlushes(); |
|
|
|
} |
|
|
|
query_cache.CommitAsyncFlushes(); |
|
|
|
} |
|
|
|
|
|
|
|
@ -183,7 +159,13 @@ private: |
|
|
|
fences.pop(); |
|
|
|
} |
|
|
|
|
|
|
|
void CommitOperations() { |
|
|
|
pending_operations.emplace_back(std::move(uncommitted_operations)); |
|
|
|
} |
|
|
|
|
|
|
|
std::queue<TFence> fences; |
|
|
|
std::deque<std::function<void()>> uncommitted_operations; |
|
|
|
std::deque<std::deque<std::function<void()>>> pending_operations; |
|
|
|
|
|
|
|
DelayedDestructionRing<TFence, 6> delayed_destruction_ring; |
|
|
|
}; |
|
|
|
|