[kernel] Improve VMA lifecycle

The vm_area objects had a number of issues I have been running into when
working on srv.init:

- It was impossible to map a VMA, fill it, unmap it, and hand it to
  another process. Unmapping the VMA in this process would cause all the
  pages to be freed, since it was removed from its last mapping.
- If a VMA was marked with vm_flag::zero, it would be zeroed out _every
  time_ it was mapped into a vm_space.
- The vm_area_open class was leaking its page_tree nodes.

In order to fix these issues, the different VMA types all work slightly
differently now:

- Physical pages allocated for a VMA are now freed when the VMA is
  deleted, not when it is unmapped.
- A knock-on effect from the first point is that vm_area_guarded is now
  based on vm_area_open, instead of vm_area_untracked. An untracked area
  cannot free its pages, since it does not track them.
- The vm_area_open type now deletes its root page_tree node. And
  page_tree nodes will delete child nodes or free physical pages in
  their dtors.
- vm_flag::zero has been removed; pages will need to be zeroed out
  further at a higher level.
- vm_area also no longer deletes itself only on losing its last handle -
  it will only self-delete when all handles _and_ mappings are gone.
This commit is contained in:
Justin C. Miller
2021-09-12 21:55:02 -07:00
parent 6317e3ad00
commit d60f8ed8d5
9 changed files with 69 additions and 38 deletions

View File

@@ -27,20 +27,21 @@ vm_area::add_to(vm_space *space)
return true;
}
bool
void
vm_area::remove_from(vm_space *space)
{
m_spaces.remove_swap(space);
return
!m_spaces.count() &&
!(m_flags && vm_flags::mmio);
if (!m_spaces.count() &&
check_signal(j6_signal_no_handles))
delete this;
}
void
vm_area::on_no_handles()
{
kobject::on_no_handles();
delete this;
if (!m_spaces.count())
delete this;
}
size_t
@@ -66,15 +67,24 @@ vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
{
}
vm_area_fixed::~vm_area_fixed() {}
vm_area_fixed::~vm_area_fixed()
{
if (m_flags && vm_flags::mmio)
return;
size_t vm_area_fixed::resize(size_t size)
size_t pages = memory::page_count(m_size);
frame_allocator::get().free(m_start, pages);
}
size_t
vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
bool
vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
@@ -83,13 +93,15 @@ bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
return true;
}
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked() {}
vm_area_untracked::~vm_area_untracked()
{
kassert(false, "An untracked VMA's pages cannot be reclaimed, leaking memory");
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
@@ -115,7 +127,11 @@ vm_area_open::vm_area_open(size_t size, vm_flags flags) :
{
}
vm_area_open::~vm_area_open() {}
vm_area_open::~vm_area_open()
{
// the page_tree will free its pages when deleted
delete m_mapped;
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
@@ -128,7 +144,7 @@ vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size,
m_start {start},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked {size, flags}
vm_area_open {size, flags}
{
}
@@ -164,6 +180,5 @@ vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_untracked::get_page(offset, phys);
return vm_area_open::get_page(offset, phys);
}

View File

@@ -52,9 +52,7 @@ public:
/// Track that this area was removed frm a vm_space
/// \arg space The space that is removing this area
/// \returns True if the removing space should free the pages
/// mapped for this area
virtual bool remove_from(vm_space *space);
virtual void remove_from(vm_space *space);
/// Change the virtual size of the memory area. This may cause
/// deallocation if the new size is smaller than the current size.
@@ -141,9 +139,8 @@ public:
/// Area split into standard-sized segments, separated by guard pages.
/// Based on vm_area_untracked, can not be shared.
class vm_area_guarded :
public vm_area_untracked
public vm_area_open
{
public:
/// Constructor.

View File

@@ -4,10 +4,10 @@
#include "kernel_memory.h"
#include "page_tree.h"
// Page tree levels map the following parts of a pagewise offset. Note the xxx
// are not part of the offset but represent the bits added for the actual virtual
// address. (Also note that level 0's entries are physical page addrs, the rest
// map other page_tree nodes)
// Page tree levels map the following parts of an offset. Note the xxx part of
// the offset but represent the bits of the actual sub-page virtual address.
// (Also note that level 0's entries are physical page addrs, the rest map
// other page_tree nodes)
//
// Level 0: 0000 0000 0003 fxxx 64 pages / 256 KiB
// Level 1: 0000 0000 00fc 0xxx 4K pages / 16 MiB -- 24-bit addressing
@@ -36,6 +36,20 @@ page_tree::page_tree(uint64_t base, uint8_t level) :
kutil::memset(m_entries, 0, sizeof(m_entries));
}
page_tree::~page_tree()
{
if (m_level) {
for (auto &e : m_entries)
delete e.child;
} else {
auto &fa = frame_allocator::get();
for (auto &e : m_entries) {
if (e.entry & 1)
fa.free(e.entry & ~0xfffull, 1);
}
}
}
bool
page_tree::contains(uint64_t offset, uint8_t &index) const
{

View File

@@ -23,6 +23,8 @@ public:
/// \returns True if a page was found
static bool find_or_add(page_tree * &root, uint64_t offset, uintptr_t &page);
~page_tree();
private:
page_tree(uint64_t base, uint8_t level);

View File

@@ -45,11 +45,8 @@ vm_space::vm_space() :
vm_space::~vm_space()
{
for (auto &a : m_areas) {
bool free = a.area->remove_from(this);
clear(*a.area, 0, memory::page_count(a.area->size()), free);
a.area->handle_release();
}
for (auto &a : m_areas)
remove_area(a.area);
kassert(!is_kernel(), "Kernel vm_space destructor!");
if (active())
@@ -76,15 +73,21 @@ vm_space::add(uintptr_t base, vm_area *area)
return true;
}
void
vm_space::remove_area(vm_area *area)
{
area->remove_from(this);
clear(*area, 0, memory::page_count(area->size()));
area->handle_release();
}
bool
vm_space::remove(vm_area *area)
{
for (auto &a : m_areas) {
if (a.area == area) {
bool free = area->remove_from(this);
clear(*area, 0, memory::page_count(area->size()), free);
remove_area(area);
m_areas.remove(a);
area->handle_release();
return true;
}
}
@@ -277,10 +280,6 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault)
if (!area->get_page(offset, phys_page))
return false;
void *mem = memory::to_virtual<void>(phys_page);
if (area->flags() && vm_flags::zero)
kutil::memset(mem, 0, memory::frame_size);
page_in(*area, offset, phys_page, 1);
return true;
}

View File

@@ -107,7 +107,6 @@ public:
private:
friend class vm_area;
friend class vm_mapper_multi;
/// Find a given VMA in this address space
bool find_vma(const vm_area &vma, uintptr_t &base) const;
@@ -118,6 +117,9 @@ private:
/// Copy a range of mappings from the given address space
void copy_from(const vm_space &source, const vm_area &vma);
/// Remove an area's mappings from this space
void remove_area(vm_area *area);
bool m_kernel;
page_table *m_pml4;