[project] Lose the battle between tabs & spaces

I'm a tabs guy. I like tabs, it's an elegant way to represent
indentation instead of brute-forcing it. But I have to admit that the
world seems to be going towards spaces, and tooling tends not to play
nice with tabs. So here we go, changing the whole repo to spaces since
I'm getting tired of all the inconsistent formatting.
This commit is contained in:
F in Chat for Tabs
2021-08-01 17:46:16 -07:00
committed by Justin C. Miller
parent d36b2d8057
commit 8f529046a9
161 changed files with 7958 additions and 7958 deletions

View File

@@ -9,82 +9,82 @@ extern vm_area_guarded g_kernel_buffers;
constexpr size_t buffer_bytes = memory::kernel_buffer_pages * memory::frame_size;
channel::channel() :
m_len(0),
m_data(g_kernel_buffers.get_section()),
m_buffer(reinterpret_cast<uint8_t*>(m_data), buffer_bytes),
kobject(kobject::type::channel, j6_signal_channel_can_send)
m_len(0),
m_data(g_kernel_buffers.get_section()),
m_buffer(reinterpret_cast<uint8_t*>(m_data), buffer_bytes),
kobject(kobject::type::channel, j6_signal_channel_can_send)
{
}
channel::~channel()
{
if (!closed()) close();
if (!closed()) close();
}
j6_status_t
channel::enqueue(size_t *len, const void *data)
{
// TODO: Make this thread safe!
if (closed())
return j6_status_closed;
// TODO: Make this thread safe!
if (closed())
return j6_status_closed;
if (!len || !*len)
return j6_err_invalid_arg;
if (!len || !*len)
return j6_err_invalid_arg;
if (m_buffer.free_space() == 0)
return j6_err_not_ready;
if (m_buffer.free_space() == 0)
return j6_err_not_ready;
void *buffer = nullptr;
size_t avail = m_buffer.reserve(*len, &buffer);
*len = *len > avail ? avail : *len;
void *buffer = nullptr;
size_t avail = m_buffer.reserve(*len, &buffer);
*len = *len > avail ? avail : *len;
kutil::memcpy(buffer, data, *len);
m_buffer.commit(*len);
kutil::memcpy(buffer, data, *len);
m_buffer.commit(*len);
assert_signal(j6_signal_channel_can_recv);
if (m_buffer.free_space() == 0)
deassert_signal(j6_signal_channel_can_send);
assert_signal(j6_signal_channel_can_recv);
if (m_buffer.free_space() == 0)
deassert_signal(j6_signal_channel_can_send);
return j6_status_ok;
return j6_status_ok;
}
j6_status_t
channel::dequeue(size_t *len, void *data)
{
// TODO: Make this thread safe!
if (closed())
return j6_status_closed;
// TODO: Make this thread safe!
if (closed())
return j6_status_closed;
if (!len || !*len)
return j6_err_invalid_arg;
if (!len || !*len)
return j6_err_invalid_arg;
if (m_buffer.size() == 0)
return j6_err_not_ready;
if (m_buffer.size() == 0)
return j6_err_not_ready;
void *buffer = nullptr;
size_t avail = m_buffer.get_block(&buffer);
*len = *len > avail ? avail : *len;
void *buffer = nullptr;
size_t avail = m_buffer.get_block(&buffer);
*len = *len > avail ? avail : *len;
kutil::memcpy(data, buffer, *len);
m_buffer.consume(*len);
kutil::memcpy(data, buffer, *len);
m_buffer.consume(*len);
assert_signal(j6_signal_channel_can_send);
if (m_buffer.size() == 0)
deassert_signal(j6_signal_channel_can_recv);
assert_signal(j6_signal_channel_can_send);
if (m_buffer.size() == 0)
deassert_signal(j6_signal_channel_can_recv);
return j6_status_ok;
return j6_status_ok;
}
void
channel::close()
{
kobject::close();
g_kernel_buffers.return_section(m_data);
kobject::close();
g_kernel_buffers.return_section(m_data);
}
void
channel::on_no_handles()
{
kobject::on_no_handles();
delete this;
kobject::on_no_handles();
delete this;
}

View File

@@ -8,42 +8,42 @@
/// Channels are bi-directional means of sending messages
class channel :
public kobject
public kobject
{
public:
channel();
virtual ~channel();
channel();
virtual ~channel();
static constexpr kobject::type type = kobject::type::channel;
static constexpr kobject::type type = kobject::type::channel;
/// Check if the channel has space for a message to be sent
inline bool can_send() const { return check_signal(j6_signal_channel_can_send); }
/// Check if the channel has space for a message to be sent
inline bool can_send() const { return check_signal(j6_signal_channel_can_send); }
/// Check if the channel has a message wiating already
inline bool can_receive() const { return check_signal(j6_signal_channel_can_recv); }
/// Check if the channel has a message wiating already
inline bool can_receive() const { return check_signal(j6_signal_channel_can_recv); }
/// Put a message into the channel
/// \arg len [in] Bytes in data buffer [out] number of bytes written
/// \arg data Pointer to the message data
/// \returns j6_status_ok on success
j6_status_t enqueue(size_t *len, const void *data);
/// Put a message into the channel
/// \arg len [in] Bytes in data buffer [out] number of bytes written
/// \arg data Pointer to the message data
/// \returns j6_status_ok on success
j6_status_t enqueue(size_t *len, const void *data);
/// Get a message from the channel, copied into a provided buffer
/// \arg len On input, the size of the provided buffer. On output,
/// the size of the message copied into the buffer.
/// \arg data Pointer to the buffer
/// \returns j6_status_ok on success
j6_status_t dequeue(size_t *len, void *data);
/// Get a message from the channel, copied into a provided buffer
/// \arg len On input, the size of the provided buffer. On output,
/// the size of the message copied into the buffer.
/// \arg data Pointer to the buffer
/// \returns j6_status_ok on success
j6_status_t dequeue(size_t *len, void *data);
/// Mark this channel as closed, all future calls to enqueue or
/// dequeue messages will fail with j6_status_closed.
virtual void close() override;
/// Mark this channel as closed, all future calls to enqueue or
/// dequeue messages will fail with j6_status_closed.
virtual void close() override;
protected:
virtual void on_no_handles() override;
virtual void on_no_handles() override;
private:
size_t m_len;
uintptr_t m_data;
kutil::bip_buffer m_buffer;
size_t m_len;
uintptr_t m_data;
kutil::bip_buffer m_buffer;
};

View File

@@ -5,128 +5,128 @@
#include "vm_space.h"
endpoint::endpoint() :
kobject {kobject::type::endpoint}
kobject {kobject::type::endpoint}
{}
endpoint::~endpoint()
{
if (!check_signal(j6_signal_closed))
close();
if (!check_signal(j6_signal_closed))
close();
}
void
endpoint::close()
{
kobject::close();
kobject::close();
for (auto &data : m_blocked) {
if (data.th)
data.th->wake_on_result(this, j6_status_closed);
}
for (auto &data : m_blocked) {
if (data.th)
data.th->wake_on_result(this, j6_status_closed);
}
device_manager::get().unbind_irqs(this);
device_manager::get().unbind_irqs(this);
}
j6_status_t
endpoint::send(j6_tag_t tag, size_t len, void *data)
{
thread_data sender = { &thread::current(), data };
sender.len = len;
sender.tag = tag;
thread_data sender = { &thread::current(), data };
sender.len = len;
sender.tag = tag;
if (!check_signal(j6_signal_endpoint_can_send)) {
assert_signal(j6_signal_endpoint_can_recv);
m_blocked.append(sender);
sender.th->wait_on_object(this);
if (!check_signal(j6_signal_endpoint_can_send)) {
assert_signal(j6_signal_endpoint_can_recv);
m_blocked.append(sender);
sender.th->wait_on_object(this);
// we woke up having already finished the send
// because it happened in the receiver
return sender.th->get_wait_result();
}
// we woke up having already finished the send
// because it happened in the receiver
return sender.th->get_wait_result();
}
thread_data receiver = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_send);
thread_data receiver = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_send);
j6_status_t status = do_message_copy(sender, receiver);
j6_status_t status = do_message_copy(sender, receiver);
receiver.th->wake_on_result(this, status);
return status;
receiver.th->wake_on_result(this, status);
return status;
}
j6_status_t
endpoint::receive(j6_tag_t *tag, size_t *len, void *data)
{
thread_data receiver = { &thread::current(), data };
receiver.tag_p = tag;
receiver.len_p = len;
thread_data receiver = { &thread::current(), data };
receiver.tag_p = tag;
receiver.len_p = len;
if (!check_signal(j6_signal_endpoint_can_recv)) {
assert_signal(j6_signal_endpoint_can_send);
m_blocked.append(receiver);
receiver.th->wait_on_object(this);
if (!check_signal(j6_signal_endpoint_can_recv)) {
assert_signal(j6_signal_endpoint_can_send);
m_blocked.append(receiver);
receiver.th->wait_on_object(this);
// we woke up having already finished the recv
// because it happened in the sender
return receiver.th->get_wait_result();
}
// we woke up having already finished the recv
// because it happened in the sender
return receiver.th->get_wait_result();
}
thread_data sender = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_recv);
thread_data sender = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_recv);
// TODO: don't pop sender on some errors
j6_status_t status = do_message_copy(sender, receiver);
// TODO: don't pop sender on some errors
j6_status_t status = do_message_copy(sender, receiver);
if (sender.th)
sender.th->wake_on_result(this, status);
if (sender.th)
sender.th->wake_on_result(this, status);
return status;
return status;
}
void
endpoint::signal_irq(unsigned irq)
{
j6_tag_t tag = j6_tag_from_irq(irq);
j6_tag_t tag = j6_tag_from_irq(irq);
if (!check_signal(j6_signal_endpoint_can_send)) {
assert_signal(j6_signal_endpoint_can_recv);
if (!check_signal(j6_signal_endpoint_can_send)) {
assert_signal(j6_signal_endpoint_can_recv);
for (auto &blocked : m_blocked)
if (blocked.tag == tag)
return;
for (auto &blocked : m_blocked)
if (blocked.tag == tag)
return;
thread_data sender = { nullptr, nullptr };
sender.tag = tag;
m_blocked.append(sender);
return;
}
thread_data sender = { nullptr, nullptr };
sender.tag = tag;
m_blocked.append(sender);
return;
}
thread_data receiver = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_send);
thread_data receiver = m_blocked.pop_front();
if (m_blocked.count() == 0)
deassert_signal(j6_signal_endpoint_can_send);
*receiver.len_p = 0;
*receiver.tag_p = tag;
receiver.th->wake_on_result(this, j6_status_ok);
*receiver.len_p = 0;
*receiver.tag_p = tag;
receiver.th->wake_on_result(this, j6_status_ok);
}
j6_status_t
endpoint::do_message_copy(const endpoint::thread_data &sender, endpoint::thread_data &receiver)
{
if (sender.len > *receiver.len_p)
return j6_err_insufficient;
if (sender.len > *receiver.len_p)
return j6_err_insufficient;
if (sender.len) {
vm_space &source = sender.th->parent().space();
vm_space &dest = receiver.th->parent().space();
vm_space::copy(source, dest, sender.data, receiver.data, sender.len);
}
if (sender.len) {
vm_space &source = sender.th->parent().space();
vm_space &dest = receiver.th->parent().space();
vm_space::copy(source, dest, sender.data, receiver.data, sender.len);
}
*receiver.len_p = sender.len;
*receiver.tag_p = sender.tag;
*receiver.len_p = sender.len;
*receiver.tag_p = sender.tag;
// TODO: this will not work if non-contiguous pages are mapped!!
// TODO: this will not work if non-contiguous pages are mapped!!
return j6_status_ok;
return j6_status_ok;
}

View File

@@ -7,59 +7,59 @@
/// Endpoints are objects that enable synchronous message-passing IPC
class endpoint :
public kobject
public kobject
{
public:
endpoint();
virtual ~endpoint();
endpoint();
virtual ~endpoint();
static constexpr kobject::type type = kobject::type::endpoint;
static constexpr kobject::type type = kobject::type::endpoint;
/// Close the endpoint, waking all waiting processes with an error
virtual void close() override;
/// Close the endpoint, waking all waiting processes with an error
virtual void close() override;
/// Check if the endpoint has space for a message to be sent
inline bool can_send() const { return check_signal(j6_signal_endpoint_can_send); }
/// Check if the endpoint has space for a message to be sent
inline bool can_send() const { return check_signal(j6_signal_endpoint_can_send); }
/// Check if the endpoint has a message wiating already
inline bool can_receive() const { return check_signal(j6_signal_endpoint_can_recv); }
/// Check if the endpoint has a message wiating already
inline bool can_receive() const { return check_signal(j6_signal_endpoint_can_recv); }
/// Send a message to a thread waiting to receive on this endpoint. If no threads
/// are currently trying to receive, block the current thread.
/// \arg tag The application-specified message tag
/// \arg len The size in bytes of the message
/// \arg data The message data
/// \returns j6_status_ok on success
j6_status_t send(j6_tag_t tag, size_t len, void *data);
/// Send a message to a thread waiting to receive on this endpoint. If no threads
/// are currently trying to receive, block the current thread.
/// \arg tag The application-specified message tag
/// \arg len The size in bytes of the message
/// \arg data The message data
/// \returns j6_status_ok on success
j6_status_t send(j6_tag_t tag, size_t len, void *data);
/// Receive a message from a thread waiting to send on this endpoint. If no threads
/// are currently trying to send, block the current thread.
/// \arg tag [in] The sender-specified message tag
/// \arg len [in] The size in bytes of the buffer [out] Number of bytes in the message
/// \arg data Buffer for copying message data into
/// \returns j6_status_ok on success
j6_status_t receive(j6_tag_t *tag, size_t *len, void *data);
/// Receive a message from a thread waiting to send on this endpoint. If no threads
/// are currently trying to send, block the current thread.
/// \arg tag [in] The sender-specified message tag
/// \arg len [in] The size in bytes of the buffer [out] Number of bytes in the message
/// \arg data Buffer for copying message data into
/// \returns j6_status_ok on success
j6_status_t receive(j6_tag_t *tag, size_t *len, void *data);
/// Give the listener on the endpoint a message that a bound IRQ has been signalled
/// \arg irq The IRQ that caused this signal
void signal_irq(unsigned irq);
/// Give the listener on the endpoint a message that a bound IRQ has been signalled
/// \arg irq The IRQ that caused this signal
void signal_irq(unsigned irq);
private:
struct thread_data
{
thread *th;
void *data;
union {
j6_tag_t *tag_p;
j6_tag_t tag;
};
union {
size_t *len_p;
size_t len;
};
};
struct thread_data
{
thread *th;
void *data;
union {
j6_tag_t *tag_p;
j6_tag_t tag;
};
union {
size_t *len_p;
size_t len;
};
};
j6_status_t do_message_copy(const thread_data &sender, thread_data &receiver);
j6_status_t do_message_copy(const thread_data &sender, thread_data &receiver);
kutil::vector<thread_data> m_blocked;
kutil::vector<thread_data> m_blocked;
};

View File

@@ -5,11 +5,11 @@
#include "objects/kobject.h"
class event :
public kobject
public kobject
{
public:
event() :
kobject(type::event) {}
event() :
kobject(type::event) {}
static constexpr kobject::type type = kobject::type::event;
static constexpr kobject::type type = kobject::type::event;
};

View File

@@ -8,67 +8,67 @@
static j6_koid_t next_koids [static_cast<size_t>(kobject::type::max)] = { 0 };
kobject::kobject(type t, j6_signal_t signals) :
m_koid(koid_generate(t)),
m_signals(signals),
m_handle_count(0)
m_koid(koid_generate(t)),
m_signals(signals),
m_handle_count(0)
{}
kobject::~kobject()
{
for (auto *t : m_blocked_threads)
t->wake_on_result(this, j6_status_destroyed);
for (auto *t : m_blocked_threads)
t->wake_on_result(this, j6_status_destroyed);
}
j6_koid_t
kobject::koid_generate(type t)
{
kassert(t < type::max, "Object type out of bounds");
uint64_t type_int = static_cast<uint64_t>(t);
return (type_int << 48) | next_koids[type_int]++;
kassert(t < type::max, "Object type out of bounds");
uint64_t type_int = static_cast<uint64_t>(t);
return (type_int << 48) | next_koids[type_int]++;
}
kobject::type
kobject::koid_type(j6_koid_t koid)
{
return static_cast<type>((koid >> 48) & 0xffffull);
return static_cast<type>((koid >> 48) & 0xffffull);
}
void
kobject::assert_signal(j6_signal_t s)
{
m_signals |= s;
notify_signal_observers();
m_signals |= s;
notify_signal_observers();
}
void
kobject::deassert_signal(j6_signal_t s)
{
m_signals &= ~s;
m_signals &= ~s;
}
void
kobject::notify_signal_observers()
{
size_t i = 0;
while (i < m_blocked_threads.count()) {
thread *t = m_blocked_threads[i];
size_t i = 0;
while (i < m_blocked_threads.count()) {
thread *t = m_blocked_threads[i];
if (t->wake_on_signals(this, m_signals)) {
m_blocked_threads.remove_swap_at(i);
} else {
++i;
}
}
if (t->wake_on_signals(this, m_signals)) {
m_blocked_threads.remove_swap_at(i);
} else {
++i;
}
}
}
void
kobject::close()
{
assert_signal(j6_signal_closed);
assert_signal(j6_signal_closed);
}
void
kobject::on_no_handles()
{
assert_signal(j6_signal_no_handles);
assert_signal(j6_signal_no_handles);
}

View File

@@ -13,88 +13,88 @@ class thread;
class kobject
{
public:
/// Types of kernel objects.
enum class type : uint16_t
{
/// Types of kernel objects.
enum class type : uint16_t
{
#define OBJECT_TYPE( name, val ) name = val,
#include "j6/tables/object_types.inc"
#undef OBJECT_TYPE
max
};
max
};
kobject(type t, j6_signal_t signals = 0ull);
virtual ~kobject();
kobject(type t, j6_signal_t signals = 0ull);
virtual ~kobject();
/// Generate a new koid for a given type
/// \arg t The object type
/// \returns A new unique koid
static j6_koid_t koid_generate(type t);
/// Generate a new koid for a given type
/// \arg t The object type
/// \returns A new unique koid
static j6_koid_t koid_generate(type t);
/// Get the kobject type from a given koid
/// \arg koid An existing koid
/// \returns The object type for the koid
static type koid_type(j6_koid_t koid);
/// Get the kobject type from a given koid
/// \arg koid An existing koid
/// \returns The object type for the koid
static type koid_type(j6_koid_t koid);
/// Get this object's type
inline type get_type() const { return koid_type(m_koid); }
/// Get this object's type
inline type get_type() const { return koid_type(m_koid); }
/// Get this object's koid
inline j6_koid_t koid() const { return m_koid; }
/// Get this object's koid
inline j6_koid_t koid() const { return m_koid; }
/// Set the given signals active on this object
/// \arg s The set of signals to assert
void assert_signal(j6_signal_t s);
/// Set the given signals active on this object
/// \arg s The set of signals to assert
void assert_signal(j6_signal_t s);
/// Clear the given signals on this object
/// \arg s The set of signals to deassert
void deassert_signal(j6_signal_t s);
/// Clear the given signals on this object
/// \arg s The set of signals to deassert
void deassert_signal(j6_signal_t s);
/// Check if the given signals are set on this object
/// \arg s The set of signals to check
inline bool check_signal(j6_signal_t s) const { return (m_signals & s) == s; }
/// Check if the given signals are set on this object
/// \arg s The set of signals to check
inline bool check_signal(j6_signal_t s) const { return (m_signals & s) == s; }
/// Get the current object signal state
inline j6_signal_t signals() const { return m_signals; }
/// Get the current object signal state
inline j6_signal_t signals() const { return m_signals; }
/// Increment the handle refcount
inline void handle_retain() { ++m_handle_count; }
/// Increment the handle refcount
inline void handle_retain() { ++m_handle_count; }
/// Decrement the handle refcount
inline void handle_release() {
if (--m_handle_count == 0) on_no_handles();
}
/// Decrement the handle refcount
inline void handle_release() {
if (--m_handle_count == 0) on_no_handles();
}
/// Add the given thread to the list of threads waiting on this object.
inline void add_blocked_thread(thread *t) { m_blocked_threads.append(t); }
/// Add the given thread to the list of threads waiting on this object.
inline void add_blocked_thread(thread *t) { m_blocked_threads.append(t); }
/// Remove the given thread from the list of threads waiting on this object.
inline void remove_blocked_thread(thread *t) { m_blocked_threads.remove_swap(t); }
/// Remove the given thread from the list of threads waiting on this object.
inline void remove_blocked_thread(thread *t) { m_blocked_threads.remove_swap(t); }
/// Perform any cleanup actions necessary to mark this object closed
virtual void close();
/// Perform any cleanup actions necessary to mark this object closed
virtual void close();
/// Check if this object has been closed
inline bool closed() const { return check_signal(j6_signal_closed); }
/// Check if this object has been closed
inline bool closed() const { return check_signal(j6_signal_closed); }
protected:
/// Interface for subclasses to handle when all handles are closed. Subclasses
/// should either call the base version, or assert j6_signal_no_handles.
virtual void on_no_handles();
/// Interface for subclasses to handle when all handles are closed. Subclasses
/// should either call the base version, or assert j6_signal_no_handles.
virtual void on_no_handles();
private:
kobject() = delete;
kobject(const kobject &other) = delete;
kobject(const kobject &&other) = delete;
kobject() = delete;
kobject(const kobject &other) = delete;
kobject(const kobject &&other) = delete;
/// Notifiy observers of this object
/// \arg result The result to pass to the observers
void notify_signal_observers();
/// Notifiy observers of this object
/// \arg result The result to pass to the observers
void notify_signal_observers();
j6_koid_t m_koid;
j6_signal_t m_signals;
uint16_t m_handle_count;
j6_koid_t m_koid;
j6_signal_t m_signals;
uint16_t m_handle_count;
protected:
kutil::vector<thread*> m_blocked_threads;
kutil::vector<thread*> m_blocked_threads;
};

View File

@@ -14,27 +14,27 @@ process &g_kernel_process = __g_kernel_process_storage.value;
process::process() :
kobject {kobject::type::process},
m_next_handle {1},
m_state {state::running}
kobject {kobject::type::process},
m_next_handle {1},
m_state {state::running}
{
j6_handle_t self = add_handle(this);
kassert(self == self_handle(), "Process self-handle is not 1");
j6_handle_t self = add_handle(this);
kassert(self == self_handle(), "Process self-handle is not 1");
}
// The "kernel process"-only constructor
process::process(page_table *kpml4) :
kobject {kobject::type::process},
m_space {kpml4},
m_next_handle {self_handle()+1},
m_state {state::running}
kobject {kobject::type::process},
m_space {kpml4},
m_next_handle {self_handle()+1},
m_state {state::running}
{
}
process::~process()
{
for (auto &it : m_handles)
if (it.val) it.val->handle_release();
for (auto &it : m_handles)
if (it.val) it.val->handle_release();
}
process & process::current() { return *current_cpu().process; }
@@ -43,115 +43,115 @@ process & process::kernel_process() { return g_kernel_process; }
process *
process::create_kernel_process(page_table *pml4)
{
return new (&g_kernel_process) process {pml4};
return new (&g_kernel_process) process {pml4};
}
void
process::exit(int32_t code)
{
// TODO: make this thread-safe
m_state = state::exited;
m_return_code = code;
close();
// TODO: make this thread-safe
m_state = state::exited;
m_return_code = code;
close();
for (auto *thread : m_threads) {
thread->exit(code);
}
for (auto *thread : m_threads) {
thread->exit(code);
}
if (this == current_cpu().process)
scheduler::get().schedule();
if (this == current_cpu().process)
scheduler::get().schedule();
}
void
process::update()
{
kassert(m_threads.count() > 0, "process::update with zero threads!");
kassert(m_threads.count() > 0, "process::update with zero threads!");
size_t i = 0;
uint32_t status = 0;
while (i < m_threads.count()) {
thread *th = m_threads[i];
if (th->has_state(thread::state::exited)) {
status = th->m_return_code;
m_threads.remove_swap_at(i);
continue;
}
i++;
}
size_t i = 0;
uint32_t status = 0;
while (i < m_threads.count()) {
thread *th = m_threads[i];
if (th->has_state(thread::state::exited)) {
status = th->m_return_code;
m_threads.remove_swap_at(i);
continue;
}
i++;
}
if (m_threads.count() == 0) {
// TODO: What really is the return code in this case?
exit(status);
}
if (m_threads.count() == 0) {
// TODO: What really is the return code in this case?
exit(status);
}
}
thread *
process::create_thread(uint8_t priority, bool user)
{
if (priority == default_priority)
priority = scheduler::default_priority;
if (priority == default_priority)
priority = scheduler::default_priority;
thread *th = new thread(*this, priority);
kassert(th, "Failed to create thread!");
thread *th = new thread(*this, priority);
kassert(th, "Failed to create thread!");
if (user) {
uintptr_t stack_top = stacks_top - (m_threads.count() * stack_size);
if (user) {
uintptr_t stack_top = stacks_top - (m_threads.count() * stack_size);
vm_flags flags = vm_flags::zero|vm_flags::write;
vm_area *vma = new vm_area_open(stack_size, flags);
m_space.add(stack_top - stack_size, vma);
vm_flags flags = vm_flags::zero|vm_flags::write;
vm_area *vma = new vm_area_open(stack_size, flags);
m_space.add(stack_top - stack_size, vma);
// Space for null frame - because the page gets zeroed on
// allocation, just pointing rsp here does the trick
th->tcb()->rsp3 = stack_top - 2 * sizeof(uint64_t);
}
// Space for null frame - because the page gets zeroed on
// allocation, just pointing rsp here does the trick
th->tcb()->rsp3 = stack_top - 2 * sizeof(uint64_t);
}
m_threads.append(th);
scheduler::get().add_thread(th->tcb());
return th;
m_threads.append(th);
scheduler::get().add_thread(th->tcb());
return th;
}
bool
process::thread_exited(thread *th)
{
kassert(&th->m_parent == this, "Process got thread_exited for non-child!");
uint32_t status = th->m_return_code;
m_threads.remove_swap(th);
remove_handle(th->self_handle());
delete th;
kassert(&th->m_parent == this, "Process got thread_exited for non-child!");
uint32_t status = th->m_return_code;
m_threads.remove_swap(th);
remove_handle(th->self_handle());
delete th;
// TODO: delete the thread's stack VMA
// TODO: delete the thread's stack VMA
if (m_threads.count() == 0) {
exit(status);
return true;
}
if (m_threads.count() == 0) {
exit(status);
return true;
}
return false;
return false;
}
j6_handle_t
process::add_handle(kobject *obj)
{
if (!obj)
return j6_handle_invalid;
if (!obj)
return j6_handle_invalid;
obj->handle_retain();
j6_handle_t handle = m_next_handle++;
m_handles.insert(handle, obj);
return handle;
obj->handle_retain();
j6_handle_t handle = m_next_handle++;
m_handles.insert(handle, obj);
return handle;
}
bool
process::remove_handle(j6_handle_t handle)
{
kobject *obj = m_handles.find(handle);
if (obj) obj->handle_release();
return m_handles.erase(handle);
kobject *obj = m_handles.find(handle);
if (obj) obj->handle_release();
return m_handles.erase(handle);
}
kobject *
process::lookup_handle(j6_handle_t handle)
{
return m_handles.find(handle);
return m_handles.find(handle);
}

View File

@@ -9,89 +9,89 @@
#include "vm_space.h"
class process :
public kobject
public kobject
{
public:
/// Top of memory area where thread stacks are allocated
constexpr static uintptr_t stacks_top = 0x0000800000000000;
/// Top of memory area where thread stacks are allocated
constexpr static uintptr_t stacks_top = 0x0000800000000000;
/// Size of userspace thread stacks
constexpr static size_t stack_size = 0x4000000; // 64MiB
/// Size of userspace thread stacks
constexpr static size_t stack_size = 0x4000000; // 64MiB
/// Value that represents default priority
constexpr static uint8_t default_priority = 0xff;
/// Value that represents default priority
constexpr static uint8_t default_priority = 0xff;
/// Constructor.
process();
/// Constructor.
process();
/// Destructor.
virtual ~process();
/// Destructor.
virtual ~process();
static constexpr kobject::type type = kobject::type::process;
static constexpr kobject::type type = kobject::type::process;
/// Get the currently executing process.
static process & current();
/// Get the currently executing process.
static process & current();
/// Terminate this process.
/// \arg code The return code to exit with.
void exit(int32_t code);
/// Terminate this process.
/// \arg code The return code to exit with.
void exit(int32_t code);
/// Update internal bookkeeping about threads.
void update();
/// Update internal bookkeeping about threads.
void update();
/// Get the process' virtual memory space
vm_space & space() { return m_space; }
/// Get the process' virtual memory space
vm_space & space() { return m_space; }
/// Create a new thread in this process
/// \args priority The new thread's scheduling priority
/// \args user If true, create a userspace stack for this thread
/// \returns The newly created thread object
thread * create_thread(uint8_t priorty = default_priority, bool user = true);
/// Create a new thread in this process
/// \args priority The new thread's scheduling priority
/// \args user If true, create a userspace stack for this thread
/// \returns The newly created thread object
thread * create_thread(uint8_t priorty = default_priority, bool user = true);
/// Start tracking an object with a handle.
/// \args obj The object this handle refers to
/// \returns The new handle for this object
j6_handle_t add_handle(kobject *obj);
/// Start tracking an object with a handle.
/// \args obj The object this handle refers to
/// \returns The new handle for this object
j6_handle_t add_handle(kobject *obj);
/// Stop tracking an object with a handle.
/// \args handle The handle that refers to the object
/// \returns True if the handle was removed
bool remove_handle(j6_handle_t handle);
/// Stop tracking an object with a handle.
/// \args handle The handle that refers to the object
/// \returns True if the handle was removed
bool remove_handle(j6_handle_t handle);
/// Lookup an object for a handle
/// \args handle The handle to the object
/// \returns Pointer to the object, or null if not found
kobject * lookup_handle(j6_handle_t handle);
/// Lookup an object for a handle
/// \args handle The handle to the object
/// \returns Pointer to the object, or null if not found
kobject * lookup_handle(j6_handle_t handle);
/// Inform the process of an exited thread
/// \args th The thread which has exited
/// \returns True if this thread ending has ended the process
bool thread_exited(thread *th);
/// Inform the process of an exited thread
/// \args th The thread which has exited
/// \returns True if this thread ending has ended the process
bool thread_exited(thread *th);
/// Get the handle for this process to refer to itself
inline j6_handle_t self_handle() const { return 1; }
/// Get the handle for this process to refer to itself
inline j6_handle_t self_handle() const { return 1; }
/// Get the process object that owns kernel threads and the
/// kernel address space
static process & kernel_process();
/// Get the process object that owns kernel threads and the
/// kernel address space
static process & kernel_process();
/// Create the special kernel process that owns kernel tasks
/// \arg pml4 The kernel-only pml4
/// \returns The kernel process object
static process * create_kernel_process(page_table *pml4);
/// Create the special kernel process that owns kernel tasks
/// \arg pml4 The kernel-only pml4
/// \returns The kernel process object
static process * create_kernel_process(page_table *pml4);
private:
// This constructor is called by create_kernel_process
process(page_table *kpml4);
// This constructor is called by create_kernel_process
process(page_table *kpml4);
int32_t m_return_code;
int32_t m_return_code;
vm_space m_space;
vm_space m_space;
kutil::vector<thread*> m_threads;
kutil::map<j6_handle_t, kobject*> m_handles;
j6_handle_t m_next_handle;
kutil::vector<thread*> m_threads;
kutil::map<j6_handle_t, kobject*> m_handles;
j6_handle_t m_next_handle;
enum class state : uint8_t { running, exited };
state m_state;
enum class state : uint8_t { running, exited };
state m_state;
};

View File

@@ -5,14 +5,14 @@
#include "objects/kobject.h"
class system :
public kobject
public kobject
{
public:
static constexpr kobject::type type = kobject::type::event;
static constexpr kobject::type type = kobject::type::event;
inline static system & get() { return s_instance; }
inline static system & get() { return s_instance; }
private:
static system s_instance;
system() : kobject(type::system) {}
static system s_instance;
system() : kobject(type::system) {}
};

View File

@@ -12,35 +12,35 @@ static constexpr j6_signal_t thread_default_signals = 0;
extern vm_area_guarded &g_kernel_stacks;
thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
kobject(kobject::type::thread, thread_default_signals),
m_parent(parent),
m_state(state::loading),
m_wait_type(wait_type::none),
m_wait_data(0),
m_wait_obj(0)
kobject(kobject::type::thread, thread_default_signals),
m_parent(parent),
m_state(state::loading),
m_wait_type(wait_type::none),
m_wait_data(0),
m_wait_obj(0)
{
parent.space().initialize_tcb(m_tcb);
m_tcb.priority = pri;
parent.space().initialize_tcb(m_tcb);
m_tcb.priority = pri;
if (!rsp0)
setup_kernel_stack();
else
m_tcb.rsp0 = rsp0;
if (!rsp0)
setup_kernel_stack();
else
m_tcb.rsp0 = rsp0;
m_self_handle = parent.add_handle(this);
m_self_handle = parent.add_handle(this);
}
thread::~thread()
{
g_kernel_stacks.return_section(m_tcb.kernel_stack);
g_kernel_stacks.return_section(m_tcb.kernel_stack);
}
thread *
thread::from_tcb(TCB *tcb)
{
static ptrdiff_t offset =
-1 * static_cast<ptrdiff_t>(offsetof(thread, m_tcb));
return reinterpret_cast<thread*>(kutil::offset_pointer(tcb, offset));
static ptrdiff_t offset =
-1 * static_cast<ptrdiff_t>(offsetof(thread, m_tcb));
return reinterpret_cast<thread*>(kutil::offset_pointer(tcb, offset));
}
thread & thread::current() { return *current_cpu().thread; }
@@ -50,176 +50,176 @@ inline void schedule_if_current(thread *t) { if (t == current_cpu().thread) sche
void
thread::wait_on_signals(j6_signal_t signals)
{
m_wait_type = wait_type::signal;
m_wait_data = signals;
clear_state(state::ready);
m_wait_type = wait_type::signal;
m_wait_data = signals;
clear_state(state::ready);
schedule_if_current(this);
schedule_if_current(this);
}
void
thread::wait_on_time(uint64_t t)
{
m_wait_type = wait_type::time;
m_wait_data = t;
clear_state(state::ready);
m_wait_type = wait_type::time;
m_wait_data = t;
clear_state(state::ready);
schedule_if_current(this);
schedule_if_current(this);
}
void
thread::wait_on_object(kobject *o)
{
m_wait_type = wait_type::object;
m_wait_data = reinterpret_cast<uint64_t>(o);
clear_state(state::ready);
m_wait_type = wait_type::object;
m_wait_data = reinterpret_cast<uint64_t>(o);
clear_state(state::ready);
schedule_if_current(this);
schedule_if_current(this);
}
bool
thread::wake_on_signals(kobject *obj, j6_signal_t signals)
{
if (m_wait_type != wait_type::signal ||
(signals & m_wait_data) == 0)
return false;
if (m_wait_type != wait_type::signal ||
(signals & m_wait_data) == 0)
return false;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_data = signals;
m_wait_obj = obj->koid();
set_state(state::ready);
return true;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_data = signals;
m_wait_obj = obj->koid();
set_state(state::ready);
return true;
}
bool
thread::wake_on_time(uint64_t now)
{
if (m_wait_type != wait_type::time ||
now < m_wait_data)
return false;
if (m_wait_type != wait_type::time ||
now < m_wait_data)
return false;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_data = now;
m_wait_obj = 0;
set_state(state::ready);
return true;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_data = now;
m_wait_obj = 0;
set_state(state::ready);
return true;
}
bool
thread::wake_on_object(kobject *o)
{
if (m_wait_type != wait_type::object ||
reinterpret_cast<uint64_t>(o) != m_wait_data)
return false;
if (m_wait_type != wait_type::object ||
reinterpret_cast<uint64_t>(o) != m_wait_data)
return false;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_obj = o->koid();
set_state(state::ready);
return true;
m_wait_type = wait_type::none;
m_wait_result = j6_status_ok;
m_wait_obj = o->koid();
set_state(state::ready);
return true;
}
void
thread::wake_on_result(kobject *obj, j6_status_t result)
{
m_wait_type = wait_type::none;
m_wait_result = result;
m_wait_data = 0;
m_wait_obj = obj->koid();
set_state(state::ready);
m_wait_type = wait_type::none;
m_wait_result = result;
m_wait_data = 0;
m_wait_obj = obj->koid();
set_state(state::ready);
}
void
thread::exit(int32_t code)
{
m_return_code = code;
set_state(state::exited);
clear_state(state::ready);
close();
m_return_code = code;
set_state(state::exited);
clear_state(state::ready);
close();
schedule_if_current(this);
schedule_if_current(this);
}
void
thread::add_thunk_kernel(uintptr_t rip)
{
// This adds just enough values to the top of the
// kernel stack to come out of task_switch correctly
// and start executing at rip (still in kernel mode)
// This adds just enough values to the top of the
// kernel stack to come out of task_switch correctly
// and start executing at rip (still in kernel mode)
m_tcb.rsp -= sizeof(uintptr_t) * 7;
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
m_tcb.rsp -= sizeof(uintptr_t) * 7;
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
stack[6] = rip; // return rip
stack[5] = m_tcb.rsp0; // rbp
stack[4] = 0xbbbbbbbb; // rbx
stack[3] = 0x12121212; // r12
stack[2] = 0x13131313; // r13
stack[1] = 0x14141414; // r14
stack[0] = 0x15151515; // r15
stack[6] = rip; // return rip
stack[5] = m_tcb.rsp0; // rbp
stack[4] = 0xbbbbbbbb; // rbx
stack[3] = 0x12121212; // r12
stack[2] = 0x13131313; // r13
stack[1] = 0x14141414; // r14
stack[0] = 0x15151515; // r15
}
void
thread::add_thunk_user(uintptr_t rip3, uintptr_t rip0, uint64_t flags)
{
// This sets up the stack to:
// a) come out of task_switch and return to rip0 (default is the
// kernel/user trampoline) (via add_thunk_kernel) - if this is
// changed, it needs to end up at the trampoline with the stack
// as it was
// b) come out of the kernel/user trampoline and start executing
// in user mode at rip
// This sets up the stack to:
// a) come out of task_switch and return to rip0 (default is the
// kernel/user trampoline) (via add_thunk_kernel) - if this is
// changed, it needs to end up at the trampoline with the stack
// as it was
// b) come out of the kernel/user trampoline and start executing
// in user mode at rip
m_tcb.rsp -= sizeof(uintptr_t) * 8;
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
flags |= 0x200;
m_tcb.rsp -= sizeof(uintptr_t) * 8;
uintptr_t *stack = reinterpret_cast<uintptr_t*>(m_tcb.rsp);
flags |= 0x200;
stack[7] = rip3; // return rip in rcx
stack[6] = m_tcb.rsp3; // rbp
stack[5] = 0xbbbbbbbb; // rbx
stack[4] = flags; // r11 sets RFLAGS
stack[3] = 0x12121212; // r12
stack[2] = 0x13131313; // r13
stack[1] = 0x14141414; // r14
stack[0] = 0x15151515; // r15
stack[7] = rip3; // return rip in rcx
stack[6] = m_tcb.rsp3; // rbp
stack[5] = 0xbbbbbbbb; // rbx
stack[4] = flags; // r11 sets RFLAGS
stack[3] = 0x12121212; // r12
stack[2] = 0x13131313; // r13
stack[1] = 0x14141414; // r14
stack[0] = 0x15151515; // r15
static const uintptr_t trampoline =
reinterpret_cast<uintptr_t>(kernel_to_user_trampoline);
add_thunk_kernel(rip0 ? rip0 : trampoline);
static const uintptr_t trampoline =
reinterpret_cast<uintptr_t>(kernel_to_user_trampoline);
add_thunk_kernel(rip0 ? rip0 : trampoline);
}
void
thread::setup_kernel_stack()
{
using memory::frame_size;
using memory::kernel_stack_pages;
static constexpr size_t stack_bytes = kernel_stack_pages * frame_size;
using memory::frame_size;
using memory::kernel_stack_pages;
static constexpr size_t stack_bytes = kernel_stack_pages * frame_size;
constexpr unsigned null_frame_entries = 2;
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
constexpr unsigned null_frame_entries = 2;
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
uintptr_t stack_addr = g_kernel_stacks.get_section();
uintptr_t stack_end = stack_addr + stack_bytes;
uintptr_t stack_addr = g_kernel_stacks.get_section();
uintptr_t stack_end = stack_addr + stack_bytes;
uint64_t *null_frame = reinterpret_cast<uint64_t*>(stack_end - null_frame_size);
for (unsigned i = 0; i < null_frame_entries; ++i)
null_frame[i] = 0;
uint64_t *null_frame = reinterpret_cast<uint64_t*>(stack_end - null_frame_size);
for (unsigned i = 0; i < null_frame_entries; ++i)
null_frame[i] = 0;
log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx",
stack_addr, stack_bytes);
log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx",
stack_addr, stack_bytes);
m_tcb.kernel_stack = stack_addr;
m_tcb.rsp0 = reinterpret_cast<uintptr_t>(null_frame);
m_tcb.rsp = m_tcb.rsp0;
m_tcb.kernel_stack = stack_addr;
m_tcb.rsp0 = reinterpret_cast<uintptr_t>(null_frame);
m_tcb.rsp = m_tcb.rsp0;
}
thread *
thread::create_idle_thread(process &kernel, uint8_t pri, uintptr_t rsp0)
{
thread *idle = new thread(kernel, pri, rsp0);
idle->set_state(state::constant);
idle->set_state(state::ready);
return idle;
thread *idle = new thread(kernel, pri, rsp0);
idle->set_state(state::constant);
idle->set_state(state::ready);
return idle;
}

View File

@@ -10,179 +10,179 @@ class process;
struct TCB
{
// Data used by assembly task control routines. If you change any of these,
// be sure to change the assembly definitions in 'tasking.inc'
uintptr_t rsp;
uintptr_t rsp0;
uintptr_t rsp3;
uintptr_t pml4;
// Data used by assembly task control routines. If you change any of these,
// be sure to change the assembly definitions in 'tasking.inc'
uintptr_t rsp;
uintptr_t rsp0;
uintptr_t rsp3;
uintptr_t pml4;
uint8_t priority;
// note: 3 bytes padding
uint8_t priority;
// note: 3 bytes padding
// TODO: move state into TCB?
// TODO: move state into TCB?
uintptr_t kernel_stack;
uintptr_t kernel_stack;
uint32_t time_left;
uint64_t last_ran;
uint32_t time_left;
uint64_t last_ran;
};
using tcb_list = kutil::linked_list<TCB>;
using tcb_node = tcb_list::item_type;
class thread :
public kobject
public kobject
{
public:
enum class wait_type : uint8_t { none, signal, time, object };
enum class state : uint8_t {
ready = 0x01,
loading = 0x02,
exited = 0x04,
constant = 0x80,
none = 0x00
};
enum class wait_type : uint8_t { none, signal, time, object };
enum class state : uint8_t {
ready = 0x01,
loading = 0x02,
exited = 0x04,
constant = 0x80,
none = 0x00
};
/// Get the pointer to the thread object containing this TCB
static thread * from_tcb(TCB *tcb);
/// Get the pointer to the thread object containing this TCB
static thread * from_tcb(TCB *tcb);
/// Destructor
virtual ~thread();
/// Destructor
virtual ~thread();
static constexpr kobject::type type = kobject::type::thread;
static constexpr kobject::type type = kobject::type::thread;
/// Get the currently executing thread.
static thread & current();
/// Get the currently executing thread.
static thread & current();
/// Get the `ready` state of the thread.
/// \returns True if the thread is ready to execute.
inline bool ready() const { return has_state(state::ready); }
/// Get the `ready` state of the thread.
/// \returns True if the thread is ready to execute.
inline bool ready() const { return has_state(state::ready); }
/// Get the `loading` state of the thread.
/// \returns True if the thread has not finished loading.
inline bool loading() const { return has_state(state::loading); }
/// Get the `loading` state of the thread.
/// \returns True if the thread has not finished loading.
inline bool loading() const { return has_state(state::loading); }
/// Get the `constant` state of the thread.
/// \returns True if the thread has constant priority.
inline bool constant() const { return has_state(state::constant); }
/// Get the `constant` state of the thread.
/// \returns True if the thread has constant priority.
inline bool constant() const { return has_state(state::constant); }
/// Get the thread priority.
inline uint8_t priority() const { return m_tcb.priority; }
/// Get the thread priority.
inline uint8_t priority() const { return m_tcb.priority; }
/// Set the thread priority.
/// \arg p The new thread priority
inline void set_priority(uint8_t p) { if (!constant()) m_tcb.priority = p; }
/// Set the thread priority.
/// \arg p The new thread priority
inline void set_priority(uint8_t p) { if (!constant()) m_tcb.priority = p; }
/// Block the thread, waiting an object's signals.
/// \arg signals Mask of signals to wait for
void wait_on_signals(j6_signal_t signals);
/// Block the thread, waiting an object's signals.
/// \arg signals Mask of signals to wait for
void wait_on_signals(j6_signal_t signals);
/// Block the thread, waiting for a given clock value
/// \arg t Clock value to wait for
void wait_on_time(uint64_t t);
/// Block the thread, waiting for a given clock value
/// \arg t Clock value to wait for
void wait_on_time(uint64_t t);
/// Block the thread, waiting on the given object
/// \arg o The ojbect that should wake this thread
void wait_on_object(kobject *o);
/// Block the thread, waiting on the given object
/// \arg o The ojbect that should wake this thread
void wait_on_object(kobject *o);
/// Wake the thread if it is waiting on signals.
/// \arg obj Object that changed signals
/// \arg signals Signal state of the object
/// \returns True if this action unblocked the thread
bool wake_on_signals(kobject *obj, j6_signal_t signals);
/// Wake the thread if it is waiting on signals.
/// \arg obj Object that changed signals
/// \arg signals Signal state of the object
/// \returns True if this action unblocked the thread
bool wake_on_signals(kobject *obj, j6_signal_t signals);
/// Wake the thread if it is waiting on the clock.
/// \arg now Current clock value
/// \returns True if this action unblocked the thread
bool wake_on_time(uint64_t now);
/// Wake the thread if it is waiting on the clock.
/// \arg now Current clock value
/// \returns True if this action unblocked the thread
bool wake_on_time(uint64_t now);
/// Wake the thread if it is waiting on the given object.
/// \arg o Object trying to wake the thread
/// \returns True if this action unblocked the thread
bool wake_on_object(kobject *o);
/// Wake the thread if it is waiting on the given object.
/// \arg o Object trying to wake the thread
/// \returns True if this action unblocked the thread
bool wake_on_object(kobject *o);
/// Wake the thread with a given result code.
/// \arg obj Object that changed signals
/// \arg result Result code to return to the thread
void wake_on_result(kobject *obj, j6_status_t result);
/// Wake the thread with a given result code.
/// \arg obj Object that changed signals
/// \arg result Result code to return to the thread
void wake_on_result(kobject *obj, j6_status_t result);
/// Get the result status code from the last blocking operation
j6_status_t get_wait_result() const { return m_wait_result; }
/// Get the result status code from the last blocking operation
j6_status_t get_wait_result() const { return m_wait_result; }
/// Get the current blocking opreation's wait data
uint64_t get_wait_data() const { return m_wait_data; }
/// Get the current blocking opreation's wait data
uint64_t get_wait_data() const { return m_wait_data; }
/// Get the current blocking operation's wait ojbect (as a handle)
j6_koid_t get_wait_object() const { return m_wait_obj; }
j6_koid_t get_wait_object() const { return m_wait_obj; }
inline bool has_state(state s) const {
return static_cast<uint8_t>(m_state) & static_cast<uint8_t>(s);
}
inline bool has_state(state s) const {
return static_cast<uint8_t>(m_state) & static_cast<uint8_t>(s);
}
inline void set_state(state s) {
m_state = static_cast<state>(static_cast<uint8_t>(m_state) | static_cast<uint8_t>(s));
}
inline void set_state(state s) {
m_state = static_cast<state>(static_cast<uint8_t>(m_state) | static_cast<uint8_t>(s));
}
inline void clear_state(state s) {
m_state = static_cast<state>(static_cast<uint8_t>(m_state) & ~static_cast<uint8_t>(s));
}
inline void clear_state(state s) {
m_state = static_cast<state>(static_cast<uint8_t>(m_state) & ~static_cast<uint8_t>(s));
}
inline tcb_node * tcb() { return &m_tcb; }
inline process & parent() { return m_parent; }
inline tcb_node * tcb() { return &m_tcb; }
inline process & parent() { return m_parent; }
/// Terminate this thread.
/// \arg code The return code to exit with.
void exit(int32_t code);
/// Terminate this thread.
/// \arg code The return code to exit with.
void exit(int32_t code);
/// Add a stack header that returns to the given address in kernel space.
/// \arg rip The address to return to, must be kernel space
void add_thunk_kernel(uintptr_t rip);
/// Add a stack header that returns to the given address in kernel space.
/// \arg rip The address to return to, must be kernel space
void add_thunk_kernel(uintptr_t rip);
/// Add a stack header that returns to the given address in user space
/// via a function in kernel space.
/// \arg rip3 The user space address to return to
/// \arg rip0 The kernel function to pass through, optional
/// \arg flags Extra RFLAGS values to set, optional
void add_thunk_user(uintptr_t rip3, uintptr_t rip0 = 0, uint64_t flags = 0);
/// Add a stack header that returns to the given address in user space
/// via a function in kernel space.
/// \arg rip3 The user space address to return to
/// \arg rip0 The kernel function to pass through, optional
/// \arg flags Extra RFLAGS values to set, optional
void add_thunk_user(uintptr_t rip3, uintptr_t rip0 = 0, uint64_t flags = 0);
/// Get the handle representing this thread to its process
j6_handle_t self_handle() const { return m_self_handle; }
/// Get the handle representing this thread to its process
j6_handle_t self_handle() const { return m_self_handle; }
/// Create the kernel idle thread
/// \arg kernel The process object that owns kernel tasks
/// \arg pri The idle thread priority value
/// \arg rsp The existing stack for the idle thread
static thread * create_idle_thread(process &kernel, uint8_t pri, uintptr_t rsp);
/// Create the kernel idle thread
/// \arg kernel The process object that owns kernel tasks
/// \arg pri The idle thread priority value
/// \arg rsp The existing stack for the idle thread
static thread * create_idle_thread(process &kernel, uint8_t pri, uintptr_t rsp);
private:
thread() = delete;
thread(const thread &other) = delete;
thread(const thread &&other) = delete;
friend class process;
thread() = delete;
thread(const thread &other) = delete;
thread(const thread &&other) = delete;
friend class process;
/// Constructor. Used when a kernel stack already exists.
/// \arg parent The process which owns this thread
/// \arg pri Initial priority level of this thread
/// \arg rsp0 The existing kernel stack rsp, 0 for none
thread(process &parent, uint8_t pri, uintptr_t rsp0 = 0);
/// Constructor. Used when a kernel stack already exists.
/// \arg parent The process which owns this thread
/// \arg pri Initial priority level of this thread
/// \arg rsp0 The existing kernel stack rsp, 0 for none
thread(process &parent, uint8_t pri, uintptr_t rsp0 = 0);
/// Set up a new empty kernel stack for this thread.
void setup_kernel_stack();
/// Set up a new empty kernel stack for this thread.
void setup_kernel_stack();
tcb_node m_tcb;
tcb_node m_tcb;
process &m_parent;
process &m_parent;
state m_state;
wait_type m_wait_type;
// There should be 1 byte of padding here
state m_state;
wait_type m_wait_type;
// There should be 1 byte of padding here
int32_t m_return_code;
int32_t m_return_code;
uint64_t m_wait_data;
j6_status_t m_wait_result;
j6_koid_t m_wait_obj;
uint64_t m_wait_data;
j6_status_t m_wait_result;
j6_koid_t m_wait_obj;
j6_handle_t m_self_handle;
j6_handle_t m_self_handle;
};

View File

@@ -7,10 +7,10 @@
using memory::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
@@ -19,50 +19,50 @@ vm_area::~vm_area() {}
bool
vm_area::add_to(vm_space *space)
{
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
bool
vm_area::remove_from(vm_space *space)
{
m_spaces.remove_swap(space);
return
!m_spaces.count() &&
!(m_flags && vm_flags::mmio);
m_spaces.remove_swap(space);
return
!m_spaces.count() &&
!(m_flags && vm_flags::mmio);
}
void
vm_area::on_no_handles()
{
kobject::on_no_handles();
delete this;
kobject::on_no_handles();
delete this;
}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
if (can_resize(size))
m_size = size;
return m_size;
}
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
m_start {start},
vm_area {size, flags}
{
}
@@ -70,22 +70,22 @@ vm_area_fixed::~vm_area_fixed() {}
size_t vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
// Not resizable
return m_size;
}
bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
if (offset > m_size)
return false;
phys = m_start + offset;
return true;
phys = m_start + offset;
return true;
}
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
vm_area {size, flags}
{
}
@@ -94,24 +94,24 @@ vm_area_untracked::~vm_area_untracked() {}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
if (offset > m_size)
return false;
return frame_allocator::get().allocate(1, &phys);
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
m_mapped {nullptr},
vm_area {size, flags}
{
}
@@ -120,15 +120,15 @@ vm_area_open::~vm_area_open() {}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
{
return page_tree::find_or_add(m_mapped, offset, phys);
return page_tree::find_or_add(m_mapped, offset, phys);
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_start {start},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked {size, flags}
m_start {start},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked {size, flags}
{
}
@@ -137,33 +137,33 @@ vm_area_guarded::~vm_area_guarded() {}
uintptr_t
vm_area_guarded::get_section()
{
if (m_cache.count() > 0) {
return m_cache.pop();
}
if (m_cache.count() > 0) {
return m_cache.pop();
}
uintptr_t addr = m_next;
m_next += (m_pages + 1) * memory::frame_size;
return m_start + addr;
uintptr_t addr = m_next;
m_next += (m_pages + 1) * memory::frame_size;
return m_start + addr;
}
void
vm_area_guarded::return_section(uintptr_t addr)
{
m_cache.append(addr);
m_cache.append(addr);
}
bool
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_next)
return false;
if (offset > m_next)
return false;
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_untracked::get_page(offset, phys);
return vm_area_untracked::get_page(offset, phys);
}

View File

@@ -20,158 +20,158 @@ enum class vm_flags : uint32_t
#define VM_FLAG(name, v) name = v,
#include "j6/tables/vm_flags.inc"
#undef VM_FLAG
driver_mask = 0x000fffff, ///< flags allowed via syscall for drivers
user_mask = 0x0000ffff, ///< flags allowed via syscall for non-drivers
driver_mask = 0x000fffff, ///< flags allowed via syscall for drivers
user_mask = 0x0000ffff, ///< flags allowed via syscall for non-drivers
};
/// Virtual memory areas allow control over memory allocation
class vm_area :
public kobject
public kobject
{
public:
static constexpr kobject::type type = kobject::type::vma;
static constexpr kobject::type type = kobject::type::vma;
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area(size_t size, vm_flags flags = vm_flags::none);
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area(size_t size, vm_flags flags = vm_flags::none);
virtual ~vm_area();
virtual ~vm_area();
/// Get the current virtual size of the memory area
inline size_t size() const { return m_size; }
/// Get the current virtual size of the memory area
inline size_t size() const { return m_size; }
/// Get the flags set for this area
inline vm_flags flags() const { return m_flags; }
/// Get the flags set for this area
inline vm_flags flags() const { return m_flags; }
/// Track that this area was added to a vm_space
/// \arg space The space to add this area to
/// \returns False if this area cannot be added
virtual bool add_to(vm_space *space);
/// Track that this area was added to a vm_space
/// \arg space The space to add this area to
/// \returns False if this area cannot be added
virtual bool add_to(vm_space *space);
/// Track that this area was removed frm a vm_space
/// \arg space The space that is removing this area
/// \returns True if the removing space should free the pages
/// mapped for this area
virtual bool remove_from(vm_space *space);
/// Track that this area was removed frm a vm_space
/// \arg space The space that is removing this area
/// \returns True if the removing space should free the pages
/// mapped for this area
virtual bool remove_from(vm_space *space);
/// Change the virtual size of the memory area. This may cause
/// deallocation if the new size is smaller than the current size.
/// Note that if resizing is unsuccessful, the previous size will
/// be returned.
/// \arg size The desired new virtual size
/// \returns The new virtual size
virtual size_t resize(size_t size);
/// Change the virtual size of the memory area. This may cause
/// deallocation if the new size is smaller than the current size.
/// Note that if resizing is unsuccessful, the previous size will
/// be returned.
/// \arg size The desired new virtual size
/// \returns The new virtual size
virtual size_t resize(size_t size);
/// Get the physical page for the given offset
/// \arg offset The offset into the VMA
/// \arg phys [out] Receives the physical page address, if any
/// \returns True if there should be a page at the given offset
virtual bool get_page(uintptr_t offset, uintptr_t &phys) = 0;
/// Get the physical page for the given offset
/// \arg offset The offset into the VMA
/// \arg phys [out] Receives the physical page address, if any
/// \returns True if there should be a page at the given offset
virtual bool get_page(uintptr_t offset, uintptr_t &phys) = 0;
protected:
virtual void on_no_handles() override;
bool can_resize(size_t size);
virtual void on_no_handles() override;
bool can_resize(size_t size);
size_t m_size;
vm_flags m_flags;
kutil::vector<vm_space*> m_spaces;
size_t m_size;
vm_flags m_flags;
kutil::vector<vm_space*> m_spaces;
// Initial static space for m_spaces - most areas will never grow
// beyond this size, so avoid allocations
static constexpr size_t static_size = 2;
vm_space *m_vector_static[static_size];
// Initial static space for m_spaces - most areas will never grow
// beyond this size, so avoid allocations
static constexpr size_t static_size = 2;
vm_space *m_vector_static[static_size];
};
/// A shareable but non-allocatable memory area of contiguous physical
/// addresses (like mmio)
class vm_area_fixed :
public vm_area
public vm_area
{
public:
/// Constructor.
/// \arg start Starting physical address of this area
/// \arg size Size of the physical memory area
/// \arg flags Flags for this memory area
vm_area_fixed(uintptr_t start, size_t size, vm_flags flags = vm_flags::none);
virtual ~vm_area_fixed();
/// Constructor.
/// \arg start Starting physical address of this area
/// \arg size Size of the physical memory area
/// \arg flags Flags for this memory area
vm_area_fixed(uintptr_t start, size_t size, vm_flags flags = vm_flags::none);
virtual ~vm_area_fixed();
virtual size_t resize(size_t size) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
virtual size_t resize(size_t size) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
uintptr_t m_start;
uintptr_t m_start;
};
/// Area that allows open allocation
class vm_area_open :
public vm_area
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_open(size_t size, vm_flags flags);
virtual ~vm_area_open();
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_open(size_t size, vm_flags flags);
virtual ~vm_area_open();
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
page_tree *m_mapped;
page_tree *m_mapped;
};
/// Area that does not track its allocations and thus cannot be shared
class vm_area_untracked :
public vm_area
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_untracked(size_t size, vm_flags flags);
virtual ~vm_area_untracked();
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_untracked(size_t size, vm_flags flags);
virtual ~vm_area_untracked();
virtual bool add_to(vm_space *space) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
virtual bool add_to(vm_space *space) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
};
/// Area split into standard-sized segments, separated by guard pages.
/// Based on vm_area_untracked, can not be shared.
class vm_area_guarded :
public vm_area_untracked
public vm_area_untracked
{
public:
/// Constructor.
/// \arg start Initial address where this area is mapped
/// \arg sec_pages Pages in an individual section
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_guarded(
uintptr_t start,
size_t sec_pages,
size_t size,
vm_flags flags);
/// Constructor.
/// \arg start Initial address where this area is mapped
/// \arg sec_pages Pages in an individual section
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_guarded(
uintptr_t start,
size_t sec_pages,
size_t size,
vm_flags flags);
virtual ~vm_area_guarded();
virtual ~vm_area_guarded();
/// Get an available section in this area
uintptr_t get_section();
/// Get an available section in this area
uintptr_t get_section();
/// Return a section address to the available pool
void return_section(uintptr_t addr);
/// Return a section address to the available pool
void return_section(uintptr_t addr);
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
kutil::vector<uintptr_t> m_cache;
uintptr_t m_start;
size_t m_pages;
uintptr_t m_next;
kutil::vector<uintptr_t> m_cache;
uintptr_t m_start;
size_t m_pages;
uintptr_t m_next;
};