[kernel] Fix scheduler deadlocks
The scheduler queue locks could deadlock if the timer fired before the scoped lock destructor ran. Also, reduce lock contention by letting only one CPU steal work at a time.
This commit is contained in:
@@ -98,6 +98,8 @@ scheduler::start()
|
|||||||
{
|
{
|
||||||
cpu_data &cpu = current_cpu();
|
cpu_data &cpu = current_cpu();
|
||||||
run_queue &queue = m_run_queues[cpu.index];
|
run_queue &queue = m_run_queues[cpu.index];
|
||||||
|
|
||||||
|
{
|
||||||
kutil::scoped_lock lock {queue.lock};
|
kutil::scoped_lock lock {queue.lock};
|
||||||
|
|
||||||
process *kp = &process::kernel_process();
|
process *kp = &process::kernel_process();
|
||||||
@@ -109,6 +111,7 @@ scheduler::start()
|
|||||||
cpu.tcb = tcb;
|
cpu.tcb = tcb;
|
||||||
|
|
||||||
queue.current = tcb;
|
queue.current = tcb;
|
||||||
|
}
|
||||||
|
|
||||||
cpu.apic->enable_timer(isr::isrTimer, false);
|
cpu.apic->enable_timer(isr::isrTimer, false);
|
||||||
cpu.apic->reset_timer(10);
|
cpu.apic->reset_timer(10);
|
||||||
@@ -219,9 +222,6 @@ balance_lists(tcb_list &to, tcb_list &from)
|
|||||||
void
|
void
|
||||||
scheduler::steal_work(cpu_data &cpu)
|
scheduler::steal_work(cpu_data &cpu)
|
||||||
{
|
{
|
||||||
// First grab a scheduler-wide lock to avoid deadlock
|
|
||||||
kutil::scoped_lock steal_lock {m_steal_lock};
|
|
||||||
|
|
||||||
// Lock this cpu's queue for the whole time while we modify it
|
// Lock this cpu's queue for the whole time while we modify it
|
||||||
run_queue &my_queue = m_run_queues[cpu.index];
|
run_queue &my_queue = m_run_queues[cpu.index];
|
||||||
kutil::scoped_lock my_queue_lock {my_queue.lock};
|
kutil::scoped_lock my_queue_lock {my_queue.lock};
|
||||||
@@ -255,9 +255,12 @@ scheduler::schedule()
|
|||||||
lapic &apic = *cpu.apic;
|
lapic &apic = *cpu.apic;
|
||||||
uint32_t remaining = apic.stop_timer();
|
uint32_t remaining = apic.stop_timer();
|
||||||
|
|
||||||
if (m_clock - queue.last_steal > steal_frequency) {
|
// Only one CPU can be stealing at a time
|
||||||
|
if (m_steal_turn == cpu.index &&
|
||||||
|
m_clock - queue.last_steal > steal_frequency) {
|
||||||
steal_work(cpu);
|
steal_work(cpu);
|
||||||
queue.last_steal = m_clock;
|
queue.last_steal = m_clock;
|
||||||
|
m_steal_turn = (m_steal_turn + 1) % m_run_queues.count();
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to explicitly lock/unlock here instead of
|
// We need to explicitly lock/unlock here instead of
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ private:
|
|||||||
// TODO: lol a real clock
|
// TODO: lol a real clock
|
||||||
uint64_t m_clock = 0;
|
uint64_t m_clock = 0;
|
||||||
|
|
||||||
kutil::spinlock m_steal_lock;
|
unsigned m_steal_turn = 0;
|
||||||
static scheduler *s_instance;
|
static scheduler *s_instance;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user