Files
jsix/src/kernel/objects/vm_area.cpp
Justin C. Miller c1d9b35e7c [bootproto] Create new bootproto lib
This is a rather large commit that is widely focused on cleaning things
out of the 'junk drawer' that is src/include. Most notably, several
things that were put in there because they needed somewhere where both
the kernel, boot, and init could read them have been moved to a new lib,
'bootproto'.

- Moved kernel_args.h and init_args.h to bootproto as kernel.h and
  init.h, respectively.

- Moved counted.h and pointer_manipulation.h into util, renaming the
  latter to util/pointers.h.

- Created a new src/include/arch for very arch-dependent definitions,
  and moved some kernel_memory.h constants like frame size, page table
  entry count, etc to arch/amd64/memory.h. Also created arch/memory.h
  which detects platform and includes the former.

- Got rid of kernel_memory.h entirely in favor of a new, cog-based
  approach. The new definitions/memory_layout.csv lists memory regions
  in descending order from the top of memory, their sizes, and whether
  they are shared outside the kernel (ie, boot needs to know them). The
  new header bootproto/memory.h exposes the addresses of the shared
  regions, while the kernel's memory.h gains the start and size of all
  the regions. Also renamed the badly-named page-offset area the linear
  area.

- The python build scripts got a few new features: the ability to parse
  the csv mentioned above in a new memory.py module; the ability to add
  dependencies to existing source files (The list of files that I had to
  pull out of the main list just to add them with the dependency on
  memory.h was getting too large. So I put them back into the sources
  list, and added the dependency post-hoc.); and the ability to
  reference 'source_root', 'build_root', and 'module_root' variables in
  .module files.

- Some utility functions that were in the kernel's memory.h got moved to
  util/pointers.h and util/misc.h, and misc.h's byteswap was renamed
  byteswap32 to be more specific.
2022-01-03 17:44:13 -08:00

187 lines
3.4 KiB
C++

#include "assert.h"
#include "frame_allocator.h"
#include "memory.h"
#include "objects/vm_area.h"
#include "page_tree.h"
#include "vm_space.h"
using mem::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
vm_area::~vm_area() {}
bool
vm_area::add_to(vm_space *space)
{
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
void
vm_area::remove_from(vm_space *space)
{
m_spaces.remove_swap(space);
if (!m_spaces.count() &&
check_signal(j6_signal_no_handles))
delete this;
}
void
vm_area::on_no_handles()
{
kobject::on_no_handles();
if (!m_spaces.count())
delete this;
}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
}
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
{
}
vm_area_fixed::~vm_area_fixed()
{
if (m_flags && vm_flags::mmio)
return;
size_t pages = mem::page_count(m_size);
frame_allocator::get().free(m_start, pages);
}
size_t
vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
bool
vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
phys = m_start + offset;
return true;
}
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked()
{
kassert(false, "An untracked VMA's pages cannot be reclaimed, leaking memory");
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
{
}
vm_area_open::~vm_area_open()
{
// the page_tree will free its pages when deleted
delete m_mapped;
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
{
return page_tree::find_or_add(m_mapped, offset, phys);
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_start {start},
m_pages {buf_pages},
m_next {mem::frame_size},
vm_area_open {size, flags}
{
}
vm_area_guarded::~vm_area_guarded() {}
uintptr_t
vm_area_guarded::get_section()
{
if (m_cache.count() > 0) {
return m_cache.pop();
}
uintptr_t addr = m_next;
m_next += (m_pages + 1) * mem::frame_size;
return m_start + addr;
}
void
vm_area_guarded::return_section(uintptr_t addr)
{
m_cache.append(addr);
}
bool
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_next)
return false;
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_open::get_page(offset, phys);
}