mirror of
https://github.com/justinian/jsix.git
synced 2025-12-10 00:14:32 -08:00
This commit contains a couple large, interdependent changes: - In preparation for capability checking, the _syscall_verify_* functions now load most handles passed in, and verify that they exist and are of the correct type. Lists and out-handles are not converted to objects. - Also in preparation for capability checking, the internal representation of handles has changed. j6_handle_t is now 32 bits, and a new j6_cap_t (also 32 bits) is added. Handles of a process are now a util::map<j6_handle_t, handle> where handle is a new struct containing the id, capabilities, and object pointer. - The kernel object definition DSL gained a few changes to support auto generating the handle -> object conversion in the _syscall_verify_* functions, mostly knowing the object type, and an optional "cname" attribute on objects where their names differ from C++ code. (Specifically vma/vm_area) - Kernel object code and other code under kernel/objects is now in a new obj:: namespace, because fuck you <cstdlib> for putting "system" in the global namespace. Why even have that header then? - Kernel object types constructed with the construct_handle helper now have a creation_caps static member to declare what capabilities a newly created object's handle should have.
254 lines
6.1 KiB
C++
254 lines
6.1 KiB
C++
#include <j6/signals.h>
|
|
#include <util/pointers.h>
|
|
|
|
#include "cpu.h"
|
|
#include "logger.h"
|
|
#include "memory.h"
|
|
#include "objects/thread.h"
|
|
#include "objects/process.h"
|
|
#include "objects/vm_area.h"
|
|
#include "scheduler.h"
|
|
|
|
extern "C" void kernel_to_user_trampoline();
|
|
extern obj::vm_area_guarded &g_kernel_stacks;
|
|
|
|
|
|
namespace obj {
|
|
|
|
static constexpr j6_signal_t thread_default_signals = 0;
|
|
|
|
thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
|
|
kobject(kobject::type::thread, thread_default_signals),
|
|
m_parent(parent),
|
|
m_state(state::loading),
|
|
m_wait_type(wait_type::none),
|
|
m_wait_data(0),
|
|
m_wait_obj(0)
|
|
{
|
|
parent.space().initialize_tcb(m_tcb);
|
|
m_tcb.priority = pri;
|
|
m_tcb.thread = this;
|
|
|
|
if (!rsp0)
|
|
setup_kernel_stack();
|
|
else
|
|
m_tcb.rsp0 = rsp0;
|
|
|
|
m_creator = current_cpu().thread;
|
|
m_self_handle = parent.add_handle(this, thread::parent_caps);
|
|
}
|
|
|
|
thread::~thread()
|
|
{
|
|
g_kernel_stacks.return_section(m_tcb.kernel_stack);
|
|
}
|
|
|
|
thread & thread::current() { return *current_cpu().thread; }
|
|
|
|
inline void schedule_if_current(thread *t) { if (t == current_cpu().thread) scheduler::get().schedule(); }
|
|
|
|
void
|
|
thread::wait_on_signals(j6_signal_t signals)
|
|
{
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
m_wait_type = wait_type::signal;
|
|
m_wait_data = signals;
|
|
clear_state(state::ready);
|
|
}
|
|
schedule_if_current(this);
|
|
}
|
|
|
|
void
|
|
thread::wait_on_time(uint64_t t)
|
|
{
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
m_wait_type = wait_type::time;
|
|
m_wait_time = t;
|
|
clear_state(state::ready);
|
|
}
|
|
schedule_if_current(this);
|
|
}
|
|
|
|
void
|
|
thread::wait_on_object(kobject *o, uint64_t t)
|
|
{
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
|
|
m_wait_type = wait_type::object;
|
|
m_wait_data = reinterpret_cast<uint64_t>(o);
|
|
|
|
if (t) {
|
|
m_wait_type |= wait_type::time;
|
|
m_wait_time = t;
|
|
}
|
|
|
|
clear_state(state::ready);
|
|
}
|
|
schedule_if_current(this);
|
|
}
|
|
|
|
bool
|
|
thread::wake_on_signals(kobject *obj, j6_signal_t signals)
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
|
|
if (!(m_wait_type & wait_type::signal) ||
|
|
(signals & m_wait_data) == 0)
|
|
return false;
|
|
|
|
m_wait_type = wait_type::none;
|
|
m_wait_result = j6_status_ok;
|
|
m_wait_data = signals;
|
|
m_wait_obj = obj->koid();
|
|
set_state(state::ready);
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
thread::wake_on_time(uint64_t now)
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
|
|
if (!(m_wait_type & wait_type::time) ||
|
|
now < m_wait_time)
|
|
return false;
|
|
|
|
if (!(m_wait_type & ~wait_type::none))
|
|
m_wait_result = j6_status_ok;
|
|
else
|
|
m_wait_result = j6_err_timed_out;
|
|
|
|
m_wait_type = wait_type::none;
|
|
m_wait_data = now;
|
|
m_wait_obj = 0;
|
|
set_state(state::ready);
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
thread::wake_on_object(kobject *o)
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
|
|
if (!(m_wait_type & wait_type::object) ||
|
|
reinterpret_cast<uint64_t>(o) != m_wait_data)
|
|
return false;
|
|
|
|
m_wait_type = wait_type::none;
|
|
m_wait_result = j6_status_ok;
|
|
m_wait_obj = o->koid();
|
|
set_state(state::ready);
|
|
return true;
|
|
}
|
|
|
|
void
|
|
thread::wake_on_result(kobject *obj, j6_status_t result)
|
|
{
|
|
util::scoped_lock {m_wait_lock};
|
|
|
|
m_wait_type = wait_type::none;
|
|
m_wait_result = result;
|
|
m_wait_data = 0;
|
|
m_wait_obj = obj->koid();
|
|
set_state(state::ready);
|
|
}
|
|
|
|
void
|
|
thread::exit(int32_t code)
|
|
{
|
|
m_return_code = code;
|
|
set_state(state::exited);
|
|
clear_state(state::ready);
|
|
close();
|
|
|
|
schedule_if_current(this);
|
|
}
|
|
|
|
void
|
|
thread::add_thunk_kernel(uintptr_t rip)
|
|
{
|
|
// This adds just enough values to the top of the
|
|
// kernel stack to come out of task_switch correctly
|
|
// and start executing at rip (still in kernel mode)
|
|
|
|
m_tcb.rsp -= sizeof(uintptr_t) * 7;
|
|
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
|
|
|
|
stack[6] = rip; // return rip
|
|
stack[5] = m_tcb.rsp0; // rbp
|
|
stack[4] = 0xbbbbbbbb; // rbx
|
|
stack[3] = 0x12121212; // r12
|
|
stack[2] = 0x13131313; // r13
|
|
stack[1] = 0x14141414; // r14
|
|
stack[0] = 0x15151515; // r15
|
|
}
|
|
|
|
void
|
|
thread::add_thunk_user(uintptr_t rip3, uintptr_t rip0, uint64_t flags)
|
|
{
|
|
// This sets up the stack to:
|
|
// a) come out of task_switch and return to rip0 (default is the
|
|
// kernel/user trampoline) (via add_thunk_kernel) - if this is
|
|
// changed, it needs to end up at the trampoline with the stack
|
|
// as it was
|
|
// b) come out of the kernel/user trampoline and start executing
|
|
// in user mode at rip
|
|
|
|
flags |= 0x200;
|
|
m_tcb.rflags3 = flags;
|
|
|
|
m_tcb.rsp -= sizeof(uintptr_t) * 7;
|
|
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
|
|
|
|
stack[6] = rip3; // return rip in rcx
|
|
stack[5] = m_tcb.rsp3; // rbp
|
|
stack[4] = 0xbbbbbbbb; // rbx
|
|
stack[3] = 0x12121212; // r12
|
|
stack[2] = 0x13131313; // r13
|
|
stack[1] = 0x14141414; // r14
|
|
stack[0] = 0x15151515; // r15
|
|
|
|
static const uintptr_t trampoline =
|
|
reinterpret_cast<uintptr_t>(kernel_to_user_trampoline);
|
|
add_thunk_kernel(rip0 ? rip0 : trampoline);
|
|
}
|
|
|
|
void
|
|
thread::setup_kernel_stack()
|
|
{
|
|
using mem::frame_size;
|
|
using mem::kernel_stack_pages;
|
|
static constexpr size_t stack_bytes = kernel_stack_pages * frame_size;
|
|
|
|
constexpr unsigned null_frame_entries = 2;
|
|
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
|
|
|
|
uintptr_t stack_addr = g_kernel_stacks.get_section();
|
|
uintptr_t stack_end = stack_addr + stack_bytes;
|
|
|
|
uint64_t *null_frame = reinterpret_cast<uint64_t*>(stack_end - null_frame_size);
|
|
for (unsigned i = 0; i < null_frame_entries; ++i)
|
|
null_frame[i] = 0;
|
|
|
|
log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx",
|
|
stack_addr, stack_bytes);
|
|
|
|
m_tcb.kernel_stack = stack_addr;
|
|
m_tcb.rsp0 = reinterpret_cast<uintptr_t>(null_frame);
|
|
m_tcb.rsp = m_tcb.rsp0;
|
|
}
|
|
|
|
thread *
|
|
thread::create_idle_thread(process &kernel, uint8_t pri, uintptr_t rsp0)
|
|
{
|
|
thread *idle = new thread(kernel, pri, rsp0);
|
|
idle->set_state(state::constant);
|
|
idle->set_state(state::ready);
|
|
return idle;
|
|
}
|
|
|
|
} // namespace obj
|