Files
jsix_import/src/kernel/objects/vm_area.cpp
Justin C. Miller f5208d1641 [all] Remove dependencies on non-freestanding libc
This is the first of two rather big changes to clean up includes
throughout the project. In this commit, the implicit semi-dependency on
libc that bonnibel adds to every module is removed. Previously, I was
sloppy with includes of libc headers and include directory order. Now,
the freestanding headers from libc are split out into libc_free, and an
implicit real dependency is added onto this module, unless `no_libc` is
set to `True`. The full libc needs to be explicitly specified as a
dependency to be used.

Several things needed to change in order to do this:

- Many places use `memset` or `memcpy` that cannot depend on libc. The
  kernel has basic implementations of them itself for this reason. Now
  those functions are moved into the lower-level `j6/memutils.h`, and
  libc merely references them. Other modules are now free to reference
  those functions from libj6 instead.
- The kernel's `assert.h` was renamed kassert.h (matching its `kassert`
  function) so that the new `util/assert.h` can use `__has_include` to
  detect it and make sure the `assert` macro is usable in libutil code.
- Several implementation header files under `__libj6/` also moved under
  the new libc_free.
- A new `include_phase` property has been added to modules for Bonnibel,
  which can be "normal" (default) or "late" which uses `-idirafter`
  instead of `-I` for includes.
- Since `<utility>` and `<new>` are not freestanding, implementations of
  `remove_reference`, `forward`, `move`, and `swap` were added to the
  `util` namespace to replace those from `std`, and `util/new.h` was
  added to declare `operator new` and `operator delete`.
2023-07-12 19:38:31 -07:00

211 lines
4.0 KiB
C++

#include "kassert.h"
#include "frame_allocator.h"
#include "memory.h"
#include "objects/vm_area.h"
#include "vm_space.h"
namespace obj {
using mem::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
vm_area::~vm_area() {}
bool
vm_area::add_to(vm_space *space)
{
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
void
vm_area::remove_from(vm_space *space)
{
m_spaces.remove_swap(space);
if (!m_spaces.count() && !handle_count())
delete this;
}
void
vm_area::on_no_handles()
{
if (!m_spaces.count())
delete this;
}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
}
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
{
}
vm_area_fixed::~vm_area_fixed()
{
if (m_flags && vm_flags::mmio)
return;
size_t pages = mem::page_count(m_size);
frame_allocator::get().free(m_start, pages);
}
size_t
vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
bool
vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys, bool alloc)
{
if (offset > m_size)
return false;
phys = m_start + offset;
return true;
}
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked()
{
kassert(false, "An untracked VMA's pages cannot be reclaimed, leaking memory");
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys, bool alloc)
{
if (offset > m_size)
return false;
if (!alloc) {
phys = 0;
return true;
}
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
{
}
vm_area_open::~vm_area_open()
{
// the page_tree will free its pages when deleted
delete m_mapped;
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys, bool alloc)
{
if (alloc)
return page_tree::find_or_add(m_mapped, offset, phys);
else
return page_tree::find(m_mapped, offset, &phys);
}
void
vm_area_open::add_existing(uintptr_t offset, uintptr_t phys)
{
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_pages {buf_pages + 1}, // Sections are N+1 pages for the leading guard page
m_stacks {start, m_pages*mem::frame_size},
vm_area_open {size, flags}
{
}
vm_area_guarded::~vm_area_guarded() {}
uintptr_t
vm_area_guarded::get_section()
{
// Account for the leading guard page
return m_stacks.allocate() + mem::frame_size;
}
void
vm_area_guarded::return_section(uintptr_t addr)
{
// Account for the leading guard page
return m_stacks.free(addr - mem::frame_size);
}
bool
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys, bool alloc)
{
if (offset >= m_stacks.end())
return false;
// make sure this isn't in a guard page. (sections have 1 leading
// guard page, so page 0 is invalid)
if ((offset >> 12) % m_pages == 0)
return false;
return vm_area_open::get_page(offset, phys, alloc);
}
vm_area_ring::vm_area_ring(size_t size, vm_flags flags) :
vm_area_open {size * 2, flags},
m_bufsize {size}
{
}
vm_area_ring::~vm_area_ring() {}
bool
vm_area_ring::get_page(uintptr_t offset, uintptr_t &phys, bool alloc)
{
if (offset > m_bufsize)
offset -= m_bufsize;
return vm_area_open::get_page(offset, phys, alloc);
}
} // namespace obj