mirror of
https://github.com/yuzu-mirror/yuzu.git
synced 2024-11-17 11:37:33 +01:00
Core_Timing: Make core_timing threadsafe by default.
The old implementation had faulty Threadsafe methods where events could be missing. This implementation unifies unsafe/safe methods and makes core timing thread safe overall.
This commit is contained in:
parent
7e2bcf04b4
commit
90792cdb6e
@ -101,7 +101,7 @@ void Stream::PlayNextBuffer() {
|
||||
|
||||
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
|
||||
|
||||
core_timing.ScheduleEventThreadsafe(GetBufferReleaseCycles(*active_buffer), release_event, {});
|
||||
core_timing.ScheduleEvent(GetBufferReleaseCycles(*active_buffer), release_event, {});
|
||||
}
|
||||
|
||||
void Stream::ReleaseActiveBuffer() {
|
||||
|
@ -56,12 +56,12 @@ void CoreTiming::Initialize() {
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
MoveEvents();
|
||||
ClearPendingEvents();
|
||||
UnregisterAllEvents();
|
||||
}
|
||||
|
||||
EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
// check for existing type with same name.
|
||||
// we want event type names to remain unique so that we can use them for serialization.
|
||||
ASSERT_MSG(event_types.find(name) == event_types.end(),
|
||||
@ -82,6 +82,7 @@ void CoreTiming::UnregisterAllEvents() {
|
||||
|
||||
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
|
||||
ASSERT(event_type != nullptr);
|
||||
std::lock_guard guard{inner_mutex};
|
||||
const s64 timeout = GetTicks() + cycles_into_future;
|
||||
|
||||
// If this event needs to be scheduled before the next advance(), force one early
|
||||
@ -93,12 +94,8 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
|
||||
u64 userdata) {
|
||||
ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type == event_type && e.userdata == userdata;
|
||||
});
|
||||
@ -110,10 +107,6 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) {
|
||||
unschedule_queue.Push(std::make_pair(event_type, userdata));
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(global_timer);
|
||||
if (!is_global_timer_sane) {
|
||||
@ -135,6 +128,7 @@ void CoreTiming::ClearPendingEvents() {
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const EventType* event_type) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
@ -145,11 +139,6 @@ void CoreTiming::RemoveEvent(const EventType* event_type) {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveNormalAndThreadsafeEvent(const EventType* event_type) {
|
||||
MoveEvents();
|
||||
RemoveEvent(event_type);
|
||||
}
|
||||
|
||||
void CoreTiming::ForceExceptionCheck(s64 cycles) {
|
||||
cycles = std::max<s64>(0, cycles);
|
||||
if (downcount <= cycles) {
|
||||
@ -162,19 +151,8 @@ void CoreTiming::ForceExceptionCheck(s64 cycles) {
|
||||
downcount = static_cast<int>(cycles);
|
||||
}
|
||||
|
||||
void CoreTiming::MoveEvents() {
|
||||
for (Event ev; ts_queue.Pop(ev);) {
|
||||
ev.fifo_order = event_fifo_id++;
|
||||
event_queue.emplace_back(std::move(ev));
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Advance() {
|
||||
MoveEvents();
|
||||
for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) {
|
||||
UnscheduleEvent(ev.first, ev.second);
|
||||
}
|
||||
std::unique_lock<std::mutex> guard(inner_mutex);
|
||||
|
||||
const int cycles_executed = slice_length - downcount;
|
||||
global_timer += cycles_executed;
|
||||
@ -186,7 +164,9 @@ void CoreTiming::Advance() {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
inner_mutex.unlock();
|
||||
evt.type->callback(evt.userdata, global_timer - evt.time);
|
||||
inner_mutex.lock();
|
||||
}
|
||||
|
||||
is_global_timer_sane = false;
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
@ -67,7 +68,7 @@ public:
|
||||
///
|
||||
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
/// Unregisters all registered events thus far.
|
||||
/// Unregisters all registered events thus far. Note: not thread unsafe
|
||||
void UnregisterAllEvents();
|
||||
|
||||
/// After the first Advance, the slice lengths and the downcount will be reduced whenever an
|
||||
@ -76,20 +77,10 @@ public:
|
||||
/// Scheduling from a callback will not update the downcount until the Advance() completes.
|
||||
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
|
||||
|
||||
/// This is to be called when outside of hle threads, such as the graphics thread, wants to
|
||||
/// schedule things to be executed on the main thread.
|
||||
///
|
||||
/// @note This doesn't change slice_length and thus events scheduled by this might be
|
||||
/// called with a delay of up to MAX_SLICE_LENGTH
|
||||
void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
|
||||
u64 userdata = 0);
|
||||
|
||||
void UnscheduleEvent(const EventType* event_type, u64 userdata);
|
||||
void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const EventType* event_type);
|
||||
void RemoveNormalAndThreadsafeEvent(const EventType* event_type);
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
@ -120,7 +111,6 @@ private:
|
||||
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
void MoveEvents();
|
||||
|
||||
s64 global_timer = 0;
|
||||
s64 idled_cycles = 0;
|
||||
@ -143,14 +133,9 @@ private:
|
||||
// remain stable regardless of rehashes/resizing.
|
||||
std::unordered_map<std::string, EventType> event_types;
|
||||
|
||||
// The queue for storing the events from other threads threadsafe until they will be added
|
||||
// to the event_queue by the emu thread
|
||||
Common::MPSCQueue<Event> ts_queue;
|
||||
|
||||
// The queue for unscheduling the events from other threads threadsafe
|
||||
Common::MPSCQueue<std::pair<const EventType*, u64>> unschedule_queue;
|
||||
|
||||
EventType* ev_lost = nullptr;
|
||||
|
||||
std::mutex inner_mutex;
|
||||
};
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
@ -76,13 +76,13 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
|
||||
// This function might be called from any thread so we have to be cautious and use the
|
||||
// thread-safe version of ScheduleEvent.
|
||||
const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe(
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(
|
||||
cycles, kernel.ThreadWakeupCallbackEventType(), callback_handle);
|
||||
}
|
||||
|
||||
void Thread::CancelWakeupTimer() {
|
||||
Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe(
|
||||
kernel.ThreadWakeupCallbackEventType(), callback_handle);
|
||||
Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
|
||||
callback_handle);
|
||||
}
|
||||
|
||||
static std::optional<s32> GetNextProcessorId(u64 mask) {
|
||||
|
@ -99,24 +99,24 @@ TEST_CASE("CoreTiming[Threadsave]", "[core]") {
|
||||
core_timing.Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
core_timing.ScheduleEventThreadsafe(1000, cb_a, CB_IDS[0]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(1000);
|
||||
REQUIRE(1000 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEventThreadsafe(500, cb_b, CB_IDS[1]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(500);
|
||||
REQUIRE(500 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEventThreadsafe(800, cb_c, CB_IDS[2]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(800);
|
||||
REQUIRE(500 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEventThreadsafe(100, cb_d, CB_IDS[3]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(100);
|
||||
REQUIRE(100 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEventThreadsafe(1200, cb_e, CB_IDS[4]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(1200);
|
||||
REQUIRE(100 == core_timing.GetDowncount());
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user