[kernel] Add an IPI to tell a CPU to run the scheduler

When waking another thread, if that thread has a more urgent priority
than the current thread on the same CPU, send that CPU an IPI to tell it
to run its scheduler.

Related changes in this commit:

- Addition of the ipiSchedule isr (vector 0xe4) and its handler in
  isr_handler().
- Change the APIC's send_ipi* functions to take an isr enum and not an
  int for their vector parameter
- Thread TCBs now contain a pointer to their current CPU's cpu_data
  structure
- Add the maybe_schedule() call to the scheduler, which sends the
  schedule IPI to the given thread's CPU only when that CPU is running a
  less-urgent thread.
- Move the locking of a run queue lock earlier in schedule() instead of
  taking the lock in steal_work() and again in schedule().
This commit is contained in:
Justin C. Miller
2022-02-26 14:04:14 -08:00
parent 40274f5fac
commit 982442eb00
10 changed files with 73 additions and 36 deletions

View File

@@ -112,8 +112,9 @@ scheduler::add_thread(TCB *t)
run_queue &queue = m_run_queues[cpu.index];
util::scoped_lock lock {queue.lock};
queue.blocked.push_back(static_cast<tcb_node*>(t));
t->cpu = &cpu;
t->time_left = quantum(t->priority);
queue.blocked.push_back(static_cast<tcb_node*>(t));
}
void
@@ -128,7 +129,7 @@ scheduler::prune(run_queue &queue, uint64_t now)
uint64_t timeout = th->wake_timeout();
if (timeout && timeout <= now)
th->wake();
th->wake_only();
bool ready = th->has_state(thread::state::ready);
bool exited = th->has_state(thread::state::exited);
@@ -167,8 +168,8 @@ scheduler::check_promotions(run_queue &queue, uint64_t now)
for (auto &pri_list : queue.ready) {
for (auto *tcb : pri_list) {
const thread *th = tcb->thread;
const bool constant = th->has_state(thread::state::constant);
if (constant)
if (th->has_state(thread::state::constant))
continue;
const uint64_t age = now - tcb->last_ran;
@@ -176,8 +177,7 @@ scheduler::check_promotions(run_queue &queue, uint64_t now)
bool stale =
age > quantum(priority) * 2 &&
tcb->priority > promote_limit &&
!constant;
tcb->priority > promote_limit;
if (stale) {
// If the thread is stale, promote it
@@ -195,7 +195,7 @@ scheduler::check_promotions(run_queue &queue, uint64_t now)
}
static size_t
balance_lists(tcb_list &to, tcb_list &from)
balance_lists(tcb_list &to, tcb_list &from, cpu_data &new_cpu)
{
size_t to_len = to.length();
size_t from_len = from.length();
@@ -205,17 +205,18 @@ balance_lists(tcb_list &to, tcb_list &from)
return 0;
size_t steal = (from_len - to_len) / 2;
for (size_t i = 0; i < steal; ++i)
to.push_front(from.pop_front());
for (size_t i = 0; i < steal; ++i) {
tcb_node *node = from.pop_front();
node->cpu = &new_cpu;
to.push_front(node);
}
return steal;
}
void
scheduler::steal_work(cpu_data &cpu)
{
// Lock this cpu's queue for the whole time while we modify it
run_queue &my_queue = m_run_queues[cpu.index];
util::scoped_lock my_queue_lock {my_queue.lock};
const unsigned count = m_run_queues.count();
for (unsigned i = 0; i < count; ++i) {
@@ -228,9 +229,9 @@ scheduler::steal_work(cpu_data &cpu)
// Don't steal from max_priority, that's the idle thread
for (unsigned pri = 0; pri < max_priority; ++pri)
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri]);
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri], cpu);
stolen += balance_lists(my_queue.blocked, other_queue.blocked);
stolen += balance_lists(my_queue.blocked, other_queue.blocked, cpu);
if (stolen)
log::debug(logs::sched, "CPU%02x stole %2d tasks from CPU%02x",
@@ -244,10 +245,18 @@ scheduler::schedule()
cpu_data &cpu = current_cpu();
run_queue &queue = m_run_queues[cpu.index];
lapic &apic = *cpu.apic;
uint32_t remaining = apic.stop_timer();
uint32_t remaining = apic.stop_timer();
uint64_t now = clock::get().value();
// We need to explicitly lock/unlock here instead of
// using a scoped lock, because the scope doesn't "end"
// for the current thread until it gets scheduled again,
// and _new_ threads start their life at the end of this
// function, which screws up RAII
util::spinlock::waiter waiter {false, nullptr, "schedule"};
queue.lock.acquire(&waiter);
// Only one CPU can be stealing at a time
if (m_steal_turn == cpu.index &&
now - queue.last_steal > steal_frequency) {
@@ -256,12 +265,6 @@ scheduler::schedule()
m_steal_turn = (m_steal_turn + 1) % m_run_queues.count();
}
// We need to explicitly lock/unlock here instead of
// using a scoped lock, because the scope doesn't "end"
// for the current thread until it gets scheduled again
util::spinlock::waiter waiter;
queue.lock.acquire(&waiter);
queue.current->time_left = remaining;
thread *th = queue.current->thread;
uint8_t priority = queue.current->priority;
@@ -325,3 +328,17 @@ scheduler::schedule()
queue.lock.release(&waiter);
task_switch(queue.current);
}
void
scheduler::maybe_schedule(TCB *t)
{
cpu_data *cpu = t->cpu;
run_queue &queue = m_run_queues[cpu->index];
uint8_t current_pri = queue.current->priority;
if (current_pri <= t->priority)
return;
current_cpu().apic->send_ipi(
lapic::ipi::fixed, isr::ipiSchedule, cpu->id);
}