[kernel] Use the hpet clock source in scheduler

There has been a global clock object for a while now, but scheduler was
never using it, instead still using its simple increment clock. Now it
uses the hpet clock.
This commit is contained in:
Justin C. Miller
2022-01-15 22:31:00 -08:00
parent c631ec5ef5
commit e845379b1e
5 changed files with 19 additions and 22 deletions

View File

@@ -333,10 +333,11 @@ device_manager::probe_pci()
}
static uint64_t
fake_clock_source(void*)
tsc_clock_source(void*)
{
static uint64_t value = 0;
return value++;
uint32_t lo = 0, hi = 0;
asm volatile ("rdtsc" : "=a" (lo), "=d" (hi));
return ((uint64_t)hi << 32) | lo;
}
void
@@ -367,7 +368,7 @@ device_manager::init_drivers()
log::info(logs::clock, "Created master clock using HPET 0: Rate %d", h.rate());
} else {
//TODO: Other clocks, APIC clock?
master_clock = new clock(5000, fake_clock_source, nullptr);
master_clock = new clock(5000, tsc_clock_source, nullptr);
}
kassert(master_clock, "Failed to allocate master clock");

View File

@@ -1,8 +1,8 @@
#include "clock.h"
#include "device_manager.h"
#include "objects/endpoint.h"
#include "objects/process.h"
#include "objects/thread.h"
#include "scheduler.h"
#include "vm_space.h"
endpoint::endpoint() :
@@ -64,7 +64,7 @@ endpoint::receive(j6_tag_t *tag, void *data, size_t *data_len, uint64_t timeout)
// Timeout is a duration, but wait_on_* calls need a time
if (timeout)
timeout += scheduler::get().clock();
timeout += clock::get().value();
if (!check_signal(j6_signal_endpoint_can_recv)) {
assert_signal(j6_signal_endpoint_can_send);

View File

@@ -36,8 +36,7 @@ struct run_queue
};
scheduler::scheduler(unsigned cpus) :
m_next_pid {1},
m_clock {0}
m_next_pid {1}
{
kassert(!s_instance, "Created multiple schedulers!");
if (!s_instance)
@@ -242,11 +241,13 @@ scheduler::schedule()
lapic &apic = *cpu.apic;
uint32_t remaining = apic.stop_timer();
uint64_t now = clock::get().value();
// Only one CPU can be stealing at a time
if (m_steal_turn == cpu.index &&
m_clock - queue.last_steal > steal_frequency) {
now - queue.last_steal > steal_frequency) {
steal_work(cpu);
queue.last_steal = m_clock;
queue.last_steal = now;
m_steal_turn = (m_steal_turn + 1) % m_run_queues.count();
}
@@ -283,9 +284,9 @@ scheduler::schedule()
}
clock::get().update();
prune(queue, ++m_clock);
if (m_clock - queue.last_promotion > promote_frequency)
check_promotions(queue, m_clock);
prune(queue, now);
if (now - queue.last_promotion > promote_frequency)
check_promotions(queue, now);
priority = 0;
while (queue.ready[priority].empty()) {
@@ -293,10 +294,10 @@ scheduler::schedule()
kassert(priority < num_priorities, "All runlists are empty");
}
queue.current->last_ran = m_clock;
queue.current->last_ran = now;
auto *next = queue.ready[priority].pop_front();
next->last_ran = m_clock;
next->last_ran = now;
apic.reset_timer(next->time_left);
if (next == queue.current) {
@@ -313,7 +314,7 @@ scheduler::schedule()
log::debug(logs::sched, "CPU%02x switching threads %llx->%llx",
cpu.index, th->koid(), next_thread->koid());
log::debug(logs::sched, " priority %d time left %d @ %lld.",
next->priority, next->time_left, m_clock);
next->priority, next->time_left, now);
log::debug(logs::sched, " PML4 %llx", next->pml4);
queue.lock.release(&waiter);

View File

@@ -72,8 +72,6 @@ public:
/// \arg t The new thread's TCB
void add_thread(TCB *t);
uint64_t clock() const { return m_clock; }
/// Get a reference to the scheduler
/// \returns A reference to the global system scheduler
static scheduler & get() { return *s_instance; }
@@ -95,9 +93,6 @@ private:
util::vector<run_queue> m_run_queues;
// TODO: lol a real clock
uint64_t m_clock = 0;
unsigned m_steal_turn = 0;
static scheduler *s_instance;
};