[kernel] Implement VMA page tracking

The previous method of VMA page tracking relied on the VMA always being
mapped at least into one space and just kept track of pages in the
spaces' page tables. This had a number of drawbacks, and the mapper
system was too complex without much benefit.

Now make VMAs themselves keep track of spaces that they're a part of,
and make them responsible for knowing what page goes where. This
simplifies most types of VMA greatly. The new vm_area_open (nee
vm_area_shared, but there is now no reason for most VMAs to be
explicitly shareable) adds a 64-ary radix tree for tracking allocated
pages.

The page_tree cannot yet handle taking pages away, but this isn't
something jsix can do yet anyway.
This commit is contained in:
Justin C. Miller
2021-01-31 22:18:44 -08:00
parent c364e30240
commit 634a1c5f6a
14 changed files with 412 additions and 411 deletions

View File

@@ -1,5 +1,7 @@
#include "frame_allocator.h"
#include "kernel_memory.h"
#include "objects/vm_area.h"
#include "page_tree.h"
#include "vm_space.h"
using memory::frame_size;
@@ -7,30 +9,31 @@ using memory::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
vm_area::~vm_area() {}
size_t
vm_area::resize(size_t size)
bool
vm_area::add_to(vm_space *space)
{
if (mapper().can_resize(size))
m_size = size;
return m_size;
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
void
vm_area::commit(uintptr_t phys, uintptr_t offset, size_t count)
bool
vm_area::remove_from(vm_space *space)
{
mapper().map(offset, count, phys);
}
void
vm_area::uncommit(uintptr_t offset, size_t count)
{
mapper().unmap(offset, count);
m_spaces.remove_swap(space);
return
!m_spaces.count() &&
!(m_flags && vm_flags::mmio);
}
void
@@ -40,21 +43,25 @@ vm_area::on_no_handles()
delete this;
}
vm_area_shared::vm_area_shared(size_t size, vm_flags flags) :
m_mapper {*this},
vm_area {size, flags}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
}
vm_area_shared::~vm_area_shared()
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(size_t size, vm_flags flags) :
m_mapper {*this},
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
{
}
@@ -63,36 +70,72 @@ vm_area_fixed::~vm_area_fixed()
{
}
vm_area_open::vm_area_open(size_t size, vm_space &space, vm_flags flags) :
m_mapper(*this, space),
vm_area(size, flags)
size_t vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
void
vm_area_open::commit(uintptr_t phys, uintptr_t offset, size_t count)
bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
m_mapper.map(offset, count, phys);
}
if (offset > m_size)
return false;
void
vm_area_open::uncommit(uintptr_t offset, size_t count)
{
m_mapper.unmap(offset, count);
phys = m_start + offset;
return true;
}
vm_area_buffers::vm_area_buffers(size_t size, vm_space &space, vm_flags flags, size_t buf_pages) :
m_mapper {*this, space},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked()
{
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
{
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
{
return page_tree::find_or_add(m_mapped, offset, phys);
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_start {start},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked {size, flags}
{
}
uintptr_t
vm_area_buffers::get_buffer()
vm_area_guarded::get_section()
{
if (m_cache.count() > 0) {
return m_cache.pop();
@@ -100,33 +143,27 @@ vm_area_buffers::get_buffer()
uintptr_t addr = m_next;
m_next += (m_pages + 1) * memory::frame_size;
return m_mapper.space().lookup(*this, addr);
return m_start + addr;
}
void
vm_area_buffers::return_buffer(uintptr_t addr)
vm_area_guarded::return_section(uintptr_t addr)
{
m_cache.append(addr);
}
bool
vm_area_buffers::allowed(uintptr_t offset) const
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset >= m_next) return false;
if (offset > m_next)
return false;
// Buffers are m_pages big plus 1 leading guard page
return memory::page_align_down(offset) % (m_pages+1);
}
void
vm_area_buffers::commit(uintptr_t phys, uintptr_t offset, size_t count)
{
m_mapper.map(offset, count, phys);
}
void
vm_area_buffers::uncommit(uintptr_t offset, size_t count)
{
m_mapper.unmap(offset, count);
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_untracked::get_page(offset, phys);
}