Files
jsix/src/kernel/objects/vm_area.cpp
Justin C. Miller f7ae2e2220 [kernel] Re-design thread blocking
In preparation for the new mailbox IPC model, blocking threads needed an
overhaul. The `wait_on_*` and `wake_on_*` methods are gone, and the
`block()` and `wake()` calls on threads now pass a value between the
waker and the blocked thread.

As part of this change, the concept of signals on the base kobject class
was removed, along with the queue of blocked threads waiting on any
given object. Signals are now exclusively the domain of the event object
type, and the new wait_queue utility class helps manage waiting threads
when an object does actually need this functionality. In some cases (eg,
logger) an event object is used instead of the lower-level wait_queue.

Since this change has a lot of ramifications, this large commit includes
the following additional changes:

- The j6_object_wait, j6_object_wait_many, and j6_thread_pause syscalls
  have been removed.
- The j6_event_clear syscall has been removed - events are "cleared" by
  reading them now. A new j6_event_wait syscall has been added to read
  events.
- The generic close() method on kobject has been removed.
- The on_no_handles() method on kobject now deletes the object by
  default, and needs to be overridden by classes that should not be.
- The j6_system_bind_irq syscall now takes an event handle, as well as a
  signal that the IRQ should set on the event. IRQs will cause a waiting
  thread to be woken with the appropriate bit set.
- Threads waking due to timeout is simplified to just having a
  wake_timeout() accessor that returns a timestamp.
- The new wait_queue uses util::deque, which caused the disovery of two
  bugs in the deque implementation: empty deques could still have a
  single array allocated and thus return true for empty(), and new
  arrays getting allocated were not being zeroed first.
- Exposed a new erase() method on util::map that takes a node pointer
  instead of a key, skipping lookup.
2022-02-22 00:00:15 -08:00

189 lines
3.4 KiB
C++

#include "assert.h"
#include "frame_allocator.h"
#include "memory.h"
#include "objects/vm_area.h"
#include "page_tree.h"
#include "vm_space.h"
namespace obj {
using mem::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
vm_area::~vm_area() {}
bool
vm_area::add_to(vm_space *space)
{
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
void
vm_area::remove_from(vm_space *space)
{
m_spaces.remove_swap(space);
if (!m_spaces.count() && !handle_count())
delete this;
}
void
vm_area::on_no_handles()
{
if (!m_spaces.count())
delete this;
}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
}
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
{
}
vm_area_fixed::~vm_area_fixed()
{
if (m_flags && vm_flags::mmio)
return;
size_t pages = mem::page_count(m_size);
frame_allocator::get().free(m_start, pages);
}
size_t
vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
bool
vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
phys = m_start + offset;
return true;
}
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked()
{
kassert(false, "An untracked VMA's pages cannot be reclaimed, leaking memory");
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
{
}
vm_area_open::~vm_area_open()
{
// the page_tree will free its pages when deleted
delete m_mapped;
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
{
return page_tree::find_or_add(m_mapped, offset, phys);
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_start {start},
m_pages {buf_pages},
m_next {mem::frame_size},
vm_area_open {size, flags}
{
}
vm_area_guarded::~vm_area_guarded() {}
uintptr_t
vm_area_guarded::get_section()
{
if (m_cache.count() > 0) {
return m_cache.pop();
}
uintptr_t addr = m_next;
m_next += (m_pages + 1) * mem::frame_size;
return m_start + addr;
}
void
vm_area_guarded::return_section(uintptr_t addr)
{
m_cache.append(addr);
}
bool
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_next)
return false;
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_open::get_page(offset, phys);
}
} // namespace obj