|
|
|
@ -142,9 +142,11 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time, |
|
|
|
} |
|
|
|
|
|
|
|
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, |
|
|
|
std::uintptr_t user_data) { |
|
|
|
std::scoped_lock scope{basic_lock}; |
|
|
|
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
|
|
|
std::uintptr_t user_data, bool wait) { |
|
|
|
{ |
|
|
|
std::scoped_lock lk{basic_lock}; |
|
|
|
const auto itr = |
|
|
|
std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
|
|
|
return e.type.lock().get() == event_type.get() && e.user_data == user_data; |
|
|
|
}); |
|
|
|
|
|
|
|
@ -155,6 +157,12 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// Force any in-progress events to finish
|
|
|
|
if (wait) { |
|
|
|
std::scoped_lock lk{advance_lock}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
void CoreTiming::AddTicks(u64 ticks_to_add) { |
|
|
|
ticks += ticks_to_add; |
|
|
|
downcount -= static_cast<s64>(ticks); |
|
|
|
@ -190,20 +198,6 @@ u64 CoreTiming::GetClockTicks() const { |
|
|
|
return CpuCyclesToClockCycles(ticks); |
|
|
|
} |
|
|
|
|
|
|
|
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { |
|
|
|
std::scoped_lock lock{basic_lock}; |
|
|
|
|
|
|
|
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
|
|
|
return e.type.lock().get() == event_type.get(); |
|
|
|
}); |
|
|
|
|
|
|
|
// Removing random items breaks the invariant so we have to re-establish it.
|
|
|
|
if (itr != event_queue.end()) { |
|
|
|
event_queue.erase(itr, event_queue.end()); |
|
|
|
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
std::optional<s64> CoreTiming::Advance() { |
|
|
|
std::scoped_lock lock{advance_lock, basic_lock}; |
|
|
|
global_timer = GetGlobalTimeNs().count(); |
|
|
|
|