[project] Lose the battle between tabs & spaces
I'm a tabs guy. I like tabs, it's an elegant way to represent indentation instead of brute-forcing it. But I have to admit that the world seems to be going towards spaces, and tooling tends not to play nice with tabs. So here we go, changing the whole repo to spaces since I'm getting tired of all the inconsistent formatting.
This commit is contained in:
committed by
Justin C. Miller
parent
d36b2d8057
commit
8f529046a9
@@ -31,304 +31,304 @@ scheduler *scheduler::s_instance = nullptr;
|
||||
|
||||
struct run_queue
|
||||
{
|
||||
tcb_node *current = nullptr;
|
||||
tcb_list ready[scheduler::num_priorities];
|
||||
tcb_list blocked;
|
||||
tcb_node *current = nullptr;
|
||||
tcb_list ready[scheduler::num_priorities];
|
||||
tcb_list blocked;
|
||||
|
||||
uint64_t last_promotion = 0;
|
||||
uint64_t last_steal = 0;
|
||||
kutil::spinlock lock;
|
||||
uint64_t last_promotion = 0;
|
||||
uint64_t last_steal = 0;
|
||||
kutil::spinlock lock;
|
||||
};
|
||||
|
||||
scheduler::scheduler(unsigned cpus) :
|
||||
m_next_pid {1},
|
||||
m_clock {0}
|
||||
m_next_pid {1},
|
||||
m_clock {0}
|
||||
{
|
||||
kassert(!s_instance, "Created multiple schedulers!");
|
||||
if (!s_instance)
|
||||
s_instance = this;
|
||||
kassert(!s_instance, "Created multiple schedulers!");
|
||||
if (!s_instance)
|
||||
s_instance = this;
|
||||
|
||||
m_run_queues.set_size(cpus);
|
||||
m_run_queues.set_size(cpus);
|
||||
}
|
||||
|
||||
scheduler::~scheduler()
|
||||
{
|
||||
// Not truly necessary - if the scheduler is going away, the whole
|
||||
// system is probably going down. But let's be clean.
|
||||
if (s_instance == this)
|
||||
s_instance = nullptr;
|
||||
// Not truly necessary - if the scheduler is going away, the whole
|
||||
// system is probably going down. But let's be clean.
|
||||
if (s_instance == this)
|
||||
s_instance = nullptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T * push(uintptr_t &rsp, size_t size = sizeof(T)) {
|
||||
rsp -= size;
|
||||
T *p = reinterpret_cast<T*>(rsp);
|
||||
rsp &= ~(sizeof(uint64_t)-1); // Align the stack
|
||||
return p;
|
||||
rsp -= size;
|
||||
T *p = reinterpret_cast<T*>(rsp);
|
||||
rsp &= ~(sizeof(uint64_t)-1); // Align the stack
|
||||
return p;
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::create_kernel_task(void (*task)(), uint8_t priority, bool constant)
|
||||
{
|
||||
thread *th = process::kernel_process().create_thread(priority, false);
|
||||
auto *tcb = th->tcb();
|
||||
thread *th = process::kernel_process().create_thread(priority, false);
|
||||
auto *tcb = th->tcb();
|
||||
|
||||
th->add_thunk_kernel(reinterpret_cast<uintptr_t>(task));
|
||||
th->add_thunk_kernel(reinterpret_cast<uintptr_t>(task));
|
||||
|
||||
tcb->time_left = quantum(priority);
|
||||
if (constant)
|
||||
th->set_state(thread::state::constant);
|
||||
tcb->time_left = quantum(priority);
|
||||
if (constant)
|
||||
th->set_state(thread::state::constant);
|
||||
|
||||
th->set_state(thread::state::ready);
|
||||
th->set_state(thread::state::ready);
|
||||
|
||||
log::debug(logs::task, "Creating kernel task: thread %llx pri %d", th->koid(), tcb->priority);
|
||||
log::debug(logs::task, " RSP0 %016lx", tcb->rsp0);
|
||||
log::debug(logs::task, " RSP %016lx", tcb->rsp);
|
||||
log::debug(logs::task, " PML4 %016lx", tcb->pml4);
|
||||
log::debug(logs::task, "Creating kernel task: thread %llx pri %d", th->koid(), tcb->priority);
|
||||
log::debug(logs::task, " RSP0 %016lx", tcb->rsp0);
|
||||
log::debug(logs::task, " RSP %016lx", tcb->rsp);
|
||||
log::debug(logs::task, " PML4 %016lx", tcb->pml4);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
scheduler::quantum(int priority)
|
||||
{
|
||||
return quantum_micros << priority;
|
||||
return quantum_micros << priority;
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::start()
|
||||
{
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
|
||||
{
|
||||
kutil::scoped_lock lock {queue.lock};
|
||||
{
|
||||
kutil::scoped_lock lock {queue.lock};
|
||||
|
||||
process *kp = &process::kernel_process();
|
||||
thread *idle = thread::create_idle_thread(*kp, max_priority, cpu.rsp0);
|
||||
process *kp = &process::kernel_process();
|
||||
thread *idle = thread::create_idle_thread(*kp, max_priority, cpu.rsp0);
|
||||
|
||||
auto *tcb = idle->tcb();
|
||||
cpu.process = kp;
|
||||
cpu.thread = idle;
|
||||
cpu.tcb = tcb;
|
||||
auto *tcb = idle->tcb();
|
||||
cpu.process = kp;
|
||||
cpu.thread = idle;
|
||||
cpu.tcb = tcb;
|
||||
|
||||
queue.current = tcb;
|
||||
}
|
||||
queue.current = tcb;
|
||||
}
|
||||
|
||||
cpu.apic->enable_timer(isr::isrTimer, false);
|
||||
cpu.apic->reset_timer(10);
|
||||
cpu.apic->enable_timer(isr::isrTimer, false);
|
||||
cpu.apic->reset_timer(10);
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::add_thread(TCB *t)
|
||||
{
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
kutil::scoped_lock lock {queue.lock};
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
kutil::scoped_lock lock {queue.lock};
|
||||
|
||||
queue.blocked.push_back(static_cast<tcb_node*>(t));
|
||||
t->time_left = quantum(t->priority);
|
||||
queue.blocked.push_back(static_cast<tcb_node*>(t));
|
||||
t->time_left = quantum(t->priority);
|
||||
}
|
||||
|
||||
void scheduler::prune(run_queue &queue, uint64_t now)
|
||||
{
|
||||
// Find processes that are ready or have exited and
|
||||
// move them to the appropriate lists.
|
||||
auto *tcb = queue.blocked.front();
|
||||
while (tcb) {
|
||||
thread *th = thread::from_tcb(tcb);
|
||||
uint8_t priority = tcb->priority;
|
||||
// Find processes that are ready or have exited and
|
||||
// move them to the appropriate lists.
|
||||
auto *tcb = queue.blocked.front();
|
||||
while (tcb) {
|
||||
thread *th = thread::from_tcb(tcb);
|
||||
uint8_t priority = tcb->priority;
|
||||
|
||||
bool ready = th->has_state(thread::state::ready);
|
||||
bool exited = th->has_state(thread::state::exited);
|
||||
bool constant = th->has_state(thread::state::constant);
|
||||
bool current = tcb == queue.current;
|
||||
bool ready = th->has_state(thread::state::ready);
|
||||
bool exited = th->has_state(thread::state::exited);
|
||||
bool constant = th->has_state(thread::state::constant);
|
||||
bool current = tcb == queue.current;
|
||||
|
||||
ready |= th->wake_on_time(now);
|
||||
ready |= th->wake_on_time(now);
|
||||
|
||||
auto *remove = tcb;
|
||||
tcb = tcb->next();
|
||||
if (!exited && !ready)
|
||||
continue;
|
||||
auto *remove = tcb;
|
||||
tcb = tcb->next();
|
||||
if (!exited && !ready)
|
||||
continue;
|
||||
|
||||
if (exited) {
|
||||
// If the current thread has exited, wait until the next call
|
||||
// to prune() to delete it, because we may be deleting our current
|
||||
// page tables
|
||||
if (current) continue;
|
||||
if (exited) {
|
||||
// If the current thread has exited, wait until the next call
|
||||
// to prune() to delete it, because we may be deleting our current
|
||||
// page tables
|
||||
if (current) continue;
|
||||
|
||||
queue.blocked.remove(remove);
|
||||
process &p = th->parent();
|
||||
queue.blocked.remove(remove);
|
||||
process &p = th->parent();
|
||||
|
||||
// thread_exited deletes the thread, and returns true if the process
|
||||
// should also now be deleted
|
||||
if(!current && p.thread_exited(th))
|
||||
delete &p;
|
||||
} else {
|
||||
queue.blocked.remove(remove);
|
||||
log::debug(logs::sched, "Prune: readying unblocked thread %llx", th->koid());
|
||||
queue.ready[remove->priority].push_back(remove);
|
||||
}
|
||||
}
|
||||
// thread_exited deletes the thread, and returns true if the process
|
||||
// should also now be deleted
|
||||
if(!current && p.thread_exited(th))
|
||||
delete &p;
|
||||
} else {
|
||||
queue.blocked.remove(remove);
|
||||
log::debug(logs::sched, "Prune: readying unblocked thread %llx", th->koid());
|
||||
queue.ready[remove->priority].push_back(remove);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::check_promotions(run_queue &queue, uint64_t now)
|
||||
{
|
||||
for (auto &pri_list : queue.ready) {
|
||||
for (auto *tcb : pri_list) {
|
||||
const thread *th = thread::from_tcb(queue.current);
|
||||
const bool constant = th->has_state(thread::state::constant);
|
||||
if (constant)
|
||||
continue;
|
||||
for (auto &pri_list : queue.ready) {
|
||||
for (auto *tcb : pri_list) {
|
||||
const thread *th = thread::from_tcb(queue.current);
|
||||
const bool constant = th->has_state(thread::state::constant);
|
||||
if (constant)
|
||||
continue;
|
||||
|
||||
const uint64_t age = now - tcb->last_ran;
|
||||
const uint8_t priority = tcb->priority;
|
||||
const uint64_t age = now - tcb->last_ran;
|
||||
const uint8_t priority = tcb->priority;
|
||||
|
||||
bool stale =
|
||||
age > quantum(priority) * 2 &&
|
||||
tcb->priority > promote_limit &&
|
||||
!constant;
|
||||
bool stale =
|
||||
age > quantum(priority) * 2 &&
|
||||
tcb->priority > promote_limit &&
|
||||
!constant;
|
||||
|
||||
if (stale) {
|
||||
// If the thread is stale, promote it
|
||||
queue.ready[priority].remove(tcb);
|
||||
tcb->priority -= 1;
|
||||
tcb->time_left = quantum(tcb->priority);
|
||||
queue.ready[tcb->priority].push_back(tcb);
|
||||
log::info(logs::sched, "Scheduler promoting thread %llx, priority %d",
|
||||
th->koid(), tcb->priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (stale) {
|
||||
// If the thread is stale, promote it
|
||||
queue.ready[priority].remove(tcb);
|
||||
tcb->priority -= 1;
|
||||
tcb->time_left = quantum(tcb->priority);
|
||||
queue.ready[tcb->priority].push_back(tcb);
|
||||
log::info(logs::sched, "Scheduler promoting thread %llx, priority %d",
|
||||
th->koid(), tcb->priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queue.last_promotion = now;
|
||||
queue.last_promotion = now;
|
||||
}
|
||||
|
||||
static size_t
|
||||
balance_lists(tcb_list &to, tcb_list &from)
|
||||
{
|
||||
size_t to_len = to.length();
|
||||
size_t from_len = from.length();
|
||||
size_t to_len = to.length();
|
||||
size_t from_len = from.length();
|
||||
|
||||
// Only steal from the rich, don't be Dennis Moore
|
||||
if (from_len <= to_len)
|
||||
return 0;
|
||||
// Only steal from the rich, don't be Dennis Moore
|
||||
if (from_len <= to_len)
|
||||
return 0;
|
||||
|
||||
size_t steal = (from_len - to_len) / 2;
|
||||
for (size_t i = 0; i < steal; ++i)
|
||||
to.push_front(from.pop_front());
|
||||
return steal;
|
||||
size_t steal = (from_len - to_len) / 2;
|
||||
for (size_t i = 0; i < steal; ++i)
|
||||
to.push_front(from.pop_front());
|
||||
return steal;
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::steal_work(cpu_data &cpu)
|
||||
{
|
||||
// Lock this cpu's queue for the whole time while we modify it
|
||||
run_queue &my_queue = m_run_queues[cpu.index];
|
||||
kutil::scoped_lock my_queue_lock {my_queue.lock};
|
||||
// Lock this cpu's queue for the whole time while we modify it
|
||||
run_queue &my_queue = m_run_queues[cpu.index];
|
||||
kutil::scoped_lock my_queue_lock {my_queue.lock};
|
||||
|
||||
const unsigned count = m_run_queues.count();
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
if (i == cpu.index) continue;
|
||||
const unsigned count = m_run_queues.count();
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
if (i == cpu.index) continue;
|
||||
|
||||
run_queue &other_queue = m_run_queues[i];
|
||||
kutil::scoped_lock other_queue_lock {other_queue.lock};
|
||||
run_queue &other_queue = m_run_queues[i];
|
||||
kutil::scoped_lock other_queue_lock {other_queue.lock};
|
||||
|
||||
size_t stolen = 0;
|
||||
size_t stolen = 0;
|
||||
|
||||
// Don't steal from max_priority, that's the idle thread
|
||||
for (unsigned pri = 0; pri < max_priority; ++pri)
|
||||
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri]);
|
||||
// Don't steal from max_priority, that's the idle thread
|
||||
for (unsigned pri = 0; pri < max_priority; ++pri)
|
||||
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri]);
|
||||
|
||||
stolen += balance_lists(my_queue.blocked, other_queue.blocked);
|
||||
stolen += balance_lists(my_queue.blocked, other_queue.blocked);
|
||||
|
||||
if (stolen)
|
||||
log::debug(logs::sched, "CPU%02x stole %2d tasks from CPU%02x",
|
||||
cpu.index, stolen, i);
|
||||
}
|
||||
if (stolen)
|
||||
log::debug(logs::sched, "CPU%02x stole %2d tasks from CPU%02x",
|
||||
cpu.index, stolen, i);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
scheduler::schedule()
|
||||
{
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
lapic &apic = *cpu.apic;
|
||||
uint32_t remaining = apic.stop_timer();
|
||||
cpu_data &cpu = current_cpu();
|
||||
run_queue &queue = m_run_queues[cpu.index];
|
||||
lapic &apic = *cpu.apic;
|
||||
uint32_t remaining = apic.stop_timer();
|
||||
|
||||
// Only one CPU can be stealing at a time
|
||||
if (m_steal_turn == cpu.index &&
|
||||
m_clock - queue.last_steal > steal_frequency) {
|
||||
steal_work(cpu);
|
||||
queue.last_steal = m_clock;
|
||||
m_steal_turn = (m_steal_turn + 1) % m_run_queues.count();
|
||||
}
|
||||
// Only one CPU can be stealing at a time
|
||||
if (m_steal_turn == cpu.index &&
|
||||
m_clock - queue.last_steal > steal_frequency) {
|
||||
steal_work(cpu);
|
||||
queue.last_steal = m_clock;
|
||||
m_steal_turn = (m_steal_turn + 1) % m_run_queues.count();
|
||||
}
|
||||
|
||||
// We need to explicitly lock/unlock here instead of
|
||||
// using a scoped lock, because the scope doesn't "end"
|
||||
// for the current thread until it gets scheduled again
|
||||
kutil::spinlock::waiter waiter;
|
||||
queue.lock.acquire(&waiter);
|
||||
// We need to explicitly lock/unlock here instead of
|
||||
// using a scoped lock, because the scope doesn't "end"
|
||||
// for the current thread until it gets scheduled again
|
||||
kutil::spinlock::waiter waiter;
|
||||
queue.lock.acquire(&waiter);
|
||||
|
||||
queue.current->time_left = remaining;
|
||||
thread *th = thread::from_tcb(queue.current);
|
||||
uint8_t priority = queue.current->priority;
|
||||
const bool constant = th->has_state(thread::state::constant);
|
||||
queue.current->time_left = remaining;
|
||||
thread *th = thread::from_tcb(queue.current);
|
||||
uint8_t priority = queue.current->priority;
|
||||
const bool constant = th->has_state(thread::state::constant);
|
||||
|
||||
if (remaining == 0) {
|
||||
if (priority < max_priority && !constant) {
|
||||
// Process used its whole timeslice, demote it
|
||||
++queue.current->priority;
|
||||
log::debug(logs::sched, "Scheduler demoting thread %llx, priority %d",
|
||||
th->koid(), queue.current->priority);
|
||||
}
|
||||
queue.current->time_left = quantum(queue.current->priority);
|
||||
} else if (remaining > 0) {
|
||||
// Process gave up CPU, give it a small bonus to its
|
||||
// remaining timeslice.
|
||||
uint32_t bonus = quantum(priority) >> 4;
|
||||
queue.current->time_left += bonus;
|
||||
}
|
||||
if (remaining == 0) {
|
||||
if (priority < max_priority && !constant) {
|
||||
// Process used its whole timeslice, demote it
|
||||
++queue.current->priority;
|
||||
log::debug(logs::sched, "Scheduler demoting thread %llx, priority %d",
|
||||
th->koid(), queue.current->priority);
|
||||
}
|
||||
queue.current->time_left = quantum(queue.current->priority);
|
||||
} else if (remaining > 0) {
|
||||
// Process gave up CPU, give it a small bonus to its
|
||||
// remaining timeslice.
|
||||
uint32_t bonus = quantum(priority) >> 4;
|
||||
queue.current->time_left += bonus;
|
||||
}
|
||||
|
||||
if (th->has_state(thread::state::ready)) {
|
||||
queue.ready[queue.current->priority].push_back(queue.current);
|
||||
} else {
|
||||
queue.blocked.push_back(queue.current);
|
||||
}
|
||||
if (th->has_state(thread::state::ready)) {
|
||||
queue.ready[queue.current->priority].push_back(queue.current);
|
||||
} else {
|
||||
queue.blocked.push_back(queue.current);
|
||||
}
|
||||
|
||||
clock::get().update();
|
||||
prune(queue, ++m_clock);
|
||||
if (m_clock - queue.last_promotion > promote_frequency)
|
||||
check_promotions(queue, m_clock);
|
||||
clock::get().update();
|
||||
prune(queue, ++m_clock);
|
||||
if (m_clock - queue.last_promotion > promote_frequency)
|
||||
check_promotions(queue, m_clock);
|
||||
|
||||
priority = 0;
|
||||
while (queue.ready[priority].empty()) {
|
||||
++priority;
|
||||
kassert(priority < num_priorities, "All runlists are empty");
|
||||
}
|
||||
priority = 0;
|
||||
while (queue.ready[priority].empty()) {
|
||||
++priority;
|
||||
kassert(priority < num_priorities, "All runlists are empty");
|
||||
}
|
||||
|
||||
queue.current->last_ran = m_clock;
|
||||
queue.current->last_ran = m_clock;
|
||||
|
||||
auto *next = queue.ready[priority].pop_front();
|
||||
next->last_ran = m_clock;
|
||||
apic.reset_timer(next->time_left);
|
||||
auto *next = queue.ready[priority].pop_front();
|
||||
next->last_ran = m_clock;
|
||||
apic.reset_timer(next->time_left);
|
||||
|
||||
if (next == queue.current) {
|
||||
queue.lock.release(&waiter);
|
||||
return;
|
||||
}
|
||||
if (next == queue.current) {
|
||||
queue.lock.release(&waiter);
|
||||
return;
|
||||
}
|
||||
|
||||
thread *next_thread = thread::from_tcb(next);
|
||||
thread *next_thread = thread::from_tcb(next);
|
||||
|
||||
cpu.thread = next_thread;
|
||||
cpu.process = &next_thread->parent();
|
||||
queue.current = next;
|
||||
cpu.thread = next_thread;
|
||||
cpu.process = &next_thread->parent();
|
||||
queue.current = next;
|
||||
|
||||
log::debug(logs::sched, "CPU%02x switching threads %llx->%llx",
|
||||
cpu.index, th->koid(), next_thread->koid());
|
||||
log::debug(logs::sched, " priority %d time left %d @ %lld.",
|
||||
next->priority, next->time_left, m_clock);
|
||||
log::debug(logs::sched, " PML4 %llx", next->pml4);
|
||||
log::debug(logs::sched, "CPU%02x switching threads %llx->%llx",
|
||||
cpu.index, th->koid(), next_thread->koid());
|
||||
log::debug(logs::sched, " priority %d time left %d @ %lld.",
|
||||
next->priority, next->time_left, m_clock);
|
||||
log::debug(logs::sched, " PML4 %llx", next->pml4);
|
||||
|
||||
queue.lock.release(&waiter);
|
||||
task_switch(queue.current);
|
||||
queue.lock.release(&waiter);
|
||||
task_switch(queue.current);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user