[kernel] Add new threads to different CPUs
Previously, when adding a new thread, we only ever added it to the current CPU and relied on work stealing to balance the CPUs. This commit has the scheduler schedule new tasks round-robin across CPUs in hopes of having to steal fewer tasks. Also adds the run_queue.prev pointer for debugging what task was just running on the given CPU.
This commit is contained in:
@@ -29,6 +29,7 @@ scheduler *scheduler::s_instance = nullptr;
|
|||||||
struct run_queue
|
struct run_queue
|
||||||
{
|
{
|
||||||
tcb_node *current = nullptr;
|
tcb_node *current = nullptr;
|
||||||
|
tcb_node *prev = nullptr;
|
||||||
tcb_list ready[scheduler::num_priorities];
|
tcb_list ready[scheduler::num_priorities];
|
||||||
tcb_list blocked;
|
tcb_list blocked;
|
||||||
|
|
||||||
@@ -38,7 +39,7 @@ struct run_queue
|
|||||||
};
|
};
|
||||||
|
|
||||||
scheduler::scheduler(unsigned cpus) :
|
scheduler::scheduler(unsigned cpus) :
|
||||||
m_next_pid {1}
|
m_add_index {0}
|
||||||
{
|
{
|
||||||
kassert(!s_instance, "Created multiple schedulers!");
|
kassert(!s_instance, "Created multiple schedulers!");
|
||||||
if (!s_instance)
|
if (!s_instance)
|
||||||
@@ -108,11 +109,11 @@ scheduler::start()
|
|||||||
void
|
void
|
||||||
scheduler::add_thread(TCB *t)
|
scheduler::add_thread(TCB *t)
|
||||||
{
|
{
|
||||||
cpu_data &cpu = current_cpu();
|
cpu_data *cpu = g_cpu_data[m_add_index++ % g_num_cpus];
|
||||||
run_queue &queue = m_run_queues[cpu.index];
|
run_queue &queue = m_run_queues[cpu->index];
|
||||||
util::scoped_lock lock {queue.lock};
|
util::scoped_lock lock {queue.lock};
|
||||||
|
|
||||||
t->cpu = &cpu;
|
t->cpu = cpu;
|
||||||
t->time_left = quantum(t->priority);
|
t->time_left = quantum(t->priority);
|
||||||
queue.blocked.push_back(static_cast<tcb_node*>(t));
|
queue.blocked.push_back(static_cast<tcb_node*>(t));
|
||||||
}
|
}
|
||||||
@@ -232,6 +233,7 @@ scheduler::steal_work(cpu_data &cpu)
|
|||||||
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri], cpu);
|
stolen += balance_lists(my_queue.ready[pri], other_queue.ready[pri], cpu);
|
||||||
|
|
||||||
stolen += balance_lists(my_queue.blocked, other_queue.blocked, cpu);
|
stolen += balance_lists(my_queue.blocked, other_queue.blocked, cpu);
|
||||||
|
other_queue_lock.release();
|
||||||
|
|
||||||
if (stolen)
|
if (stolen)
|
||||||
log::debug(logs::sched, "CPU%02x stole %2d tasks from CPU%02x",
|
log::debug(logs::sched, "CPU%02x stole %2d tasks from CPU%02x",
|
||||||
@@ -313,6 +315,7 @@ scheduler::schedule()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
queue.prev = queue.current;
|
||||||
thread *next_thread = next->thread;
|
thread *next_thread = next->thread;
|
||||||
|
|
||||||
cpu.thread = next_thread;
|
cpu.thread = next_thread;
|
||||||
|
|||||||
@@ -5,6 +5,8 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <util/vector.h>
|
#include <util/vector.h>
|
||||||
|
|
||||||
|
extern cpu_data** g_cpu_data;
|
||||||
|
|
||||||
namespace kernel {
|
namespace kernel {
|
||||||
namespace args {
|
namespace args {
|
||||||
struct program;
|
struct program;
|
||||||
@@ -86,14 +88,14 @@ public:
|
|||||||
private:
|
private:
|
||||||
friend class obj::process;
|
friend class obj::process;
|
||||||
|
|
||||||
static constexpr uint64_t promote_frequency = 10;
|
static constexpr uint64_t promote_frequency = 100;
|
||||||
static constexpr uint64_t steal_frequency = 10;
|
static constexpr uint64_t steal_frequency = 10;
|
||||||
|
|
||||||
void prune(run_queue &queue, uint64_t now);
|
void prune(run_queue &queue, uint64_t now);
|
||||||
void check_promotions(run_queue &queue, uint64_t now);
|
void check_promotions(run_queue &queue, uint64_t now);
|
||||||
void steal_work(cpu_data &cpu);
|
void steal_work(cpu_data &cpu);
|
||||||
|
|
||||||
uint32_t m_next_pid;
|
uint32_t m_add_index;
|
||||||
uint32_t m_tick_count;
|
uint32_t m_tick_count;
|
||||||
|
|
||||||
obj::process *m_kernel_process;
|
obj::process *m_kernel_process;
|
||||||
|
|||||||
Reference in New Issue
Block a user