[kernel] Implement VMA page tracking

The previous method of VMA page tracking relied on the VMA always being
mapped at least into one space and just kept track of pages in the
spaces' page tables. This had a number of drawbacks, and the mapper
system was too complex without much benefit.

Now make VMAs themselves keep track of spaces that they're a part of,
and make them responsible for knowing what page goes where. This
simplifies most types of VMA greatly. The new vm_area_open (nee
vm_area_shared, but there is now no reason for most VMAs to be
explicitly shareable) adds a 64-ary radix tree for tracking allocated
pages.

The page_tree cannot yet handle taking pages away, but this isn't
something jsix can do yet anyway.
This commit is contained in:
Justin C. Miller
2021-01-31 22:18:44 -08:00
parent c364e30240
commit 634a1c5f6a
14 changed files with 412 additions and 411 deletions

View File

@@ -41,6 +41,7 @@ modules:
- src/kernel/objects/system.cpp
- src/kernel/objects/vm_area.cpp
- src/kernel/page_table.cpp
- src/kernel/page_tree.cpp
- src/kernel/pci.cpp
- src/kernel/scheduler.cpp
- src/kernel/serial.cpp
@@ -55,7 +56,6 @@ modules:
- src/kernel/syscalls/thread.cpp
- src/kernel/syscalls/vm_area.cpp
- src/kernel/task.s
- src/kernel/vm_mapper.cpp
- src/kernel/vm_space.cpp
boot:

View File

@@ -35,20 +35,20 @@ kutil::heap_allocator &g_kernel_heap = __g_kernel_heap_storage.value;
static kutil::no_construct<frame_allocator> __g_frame_allocator_storage;
frame_allocator &g_frame_allocator = __g_frame_allocator_storage.value;
static kutil::no_construct<vm_area_open> __g_kernel_heap_area_storage;
vm_area_open &g_kernel_heap_area = __g_kernel_heap_area_storage.value;
static kutil::no_construct<vm_area_untracked> __g_kernel_heap_area_storage;
vm_area_untracked &g_kernel_heap_area = __g_kernel_heap_area_storage.value;
vm_area_buffers g_kernel_stacks {
vm_area_guarded g_kernel_stacks {
memory::stacks_start,
memory::kernel_stack_pages,
memory::kernel_max_stacks,
vm_space::kernel_space(),
vm_flags::write,
memory::kernel_stack_pages};
vm_flags::write};
vm_area_buffers g_kernel_buffers {
vm_area_guarded g_kernel_buffers {
memory::buffers_start,
memory::kernel_buffer_pages,
memory::kernel_max_buffers,
vm_space::kernel_space(),
vm_flags::write,
memory::kernel_buffer_pages};
vm_flags::write};
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
@@ -101,7 +101,7 @@ memory_initialize_pre_ctors(args::header &kargs)
vm_space &vm = kp->space();
vm_area *heap = new (&g_kernel_heap_area)
vm_area_open(kernel_max_heap, vm, vm_flags::write);
vm_area_untracked(kernel_max_heap, vm_flags::write);
vm.add(heap_start, heap);
}
@@ -197,9 +197,8 @@ load_simple_process(args::program &program)
(bitfield_has(sect.type, section_flags::execute) ? vm_flags::exec : vm_flags::none) |
(bitfield_has(sect.type, section_flags::write) ? vm_flags::write : vm_flags::none);
vm_area *vma = new vm_area_fixed(sect.size, flags);
vm_area *vma = new vm_area_fixed(sect.phys_addr, sect.size, flags);
space.add(sect.virt_addr, vma);
vma->commit(sect.phys_addr, 0, memory::page_count(sect.size));
}
uint64_t iopl = (3ull << 12);

View File

@@ -4,13 +4,13 @@
#include "objects/channel.h"
#include "objects/vm_area.h"
extern vm_area_buffers g_kernel_buffers;
extern vm_area_guarded g_kernel_buffers;
constexpr size_t buffer_bytes = memory::kernel_buffer_pages * memory::frame_size;
channel::channel() :
m_len(0),
m_data(g_kernel_buffers.get_buffer()),
m_data(g_kernel_buffers.get_section()),
m_buffer(reinterpret_cast<uint8_t*>(m_data), buffer_bytes),
kobject(kobject::type::channel, j6_signal_channel_can_send)
{
@@ -79,7 +79,7 @@ void
channel::close()
{
kobject::close();
g_kernel_buffers.return_buffer(m_data);
g_kernel_buffers.return_section(m_data);
}
void

View File

@@ -102,8 +102,8 @@ process::create_thread(uint8_t priority, bool user)
if (user) {
uintptr_t stack_top = stacks_top - (m_threads.count() * stack_size);
vm_area *vma = new vm_area_open(stack_size, m_space,
vm_flags::zero|vm_flags::write);
vm_flags flags = vm_flags::zero|vm_flags::write;
vm_area *vma = new vm_area_open(stack_size, flags);
m_space.add(stack_top - stack_size, vma);
// Space for null frame - because the page gets zeroed on

View File

@@ -9,7 +9,7 @@
extern "C" void kernel_to_user_trampoline();
static constexpr j6_signal_t thread_default_signals = 0;
extern vm_area_buffers g_kernel_stacks;
extern vm_area_guarded g_kernel_stacks;
thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
kobject(kobject::type::thread, thread_default_signals),
@@ -32,7 +32,7 @@ thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
thread::~thread()
{
g_kernel_stacks.return_buffer(m_tcb.kernel_stack);
g_kernel_stacks.return_section(m_tcb.kernel_stack);
}
thread *
@@ -204,7 +204,7 @@ thread::setup_kernel_stack()
constexpr unsigned null_frame_entries = 2;
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
uintptr_t stack_addr = g_kernel_stacks.get_buffer();
uintptr_t stack_addr = g_kernel_stacks.get_section();
uintptr_t stack_end = stack_addr + stack_bytes;
uint64_t *null_frame = reinterpret_cast<uint64_t*>(stack_end - null_frame_size);

View File

@@ -1,5 +1,7 @@
#include "frame_allocator.h"
#include "kernel_memory.h"
#include "objects/vm_area.h"
#include "page_tree.h"
#include "vm_space.h"
using memory::frame_size;
@@ -7,30 +9,31 @@ using memory::frame_size;
vm_area::vm_area(size_t size, vm_flags flags) :
m_size {size},
m_flags {flags},
m_spaces {m_vector_static, 0, static_size},
kobject {kobject::type::vma}
{
}
vm_area::~vm_area() {}
size_t
vm_area::resize(size_t size)
bool
vm_area::add_to(vm_space *space)
{
if (mapper().can_resize(size))
m_size = size;
return m_size;
for (auto *s : m_spaces) {
if (s == space)
return true;
}
m_spaces.append(space);
return true;
}
void
vm_area::commit(uintptr_t phys, uintptr_t offset, size_t count)
bool
vm_area::remove_from(vm_space *space)
{
mapper().map(offset, count, phys);
}
void
vm_area::uncommit(uintptr_t offset, size_t count)
{
mapper().unmap(offset, count);
m_spaces.remove_swap(space);
return
!m_spaces.count() &&
!(m_flags && vm_flags::mmio);
}
void
@@ -40,21 +43,25 @@ vm_area::on_no_handles()
delete this;
}
vm_area_shared::vm_area_shared(size_t size, vm_flags flags) :
m_mapper {*this},
vm_area {size, flags}
size_t
vm_area::resize(size_t size)
{
if (can_resize(size))
m_size = size;
return m_size;
}
vm_area_shared::~vm_area_shared()
bool
vm_area::can_resize(size_t size)
{
for (auto *space : m_spaces)
if (!space->can_resize(*this, size))
return false;
return true;
}
vm_area_fixed::vm_area_fixed(size_t size, vm_flags flags) :
m_mapper {*this},
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
m_start {start},
vm_area {size, flags}
{
}
@@ -63,36 +70,72 @@ vm_area_fixed::~vm_area_fixed()
{
}
vm_area_open::vm_area_open(size_t size, vm_space &space, vm_flags flags) :
m_mapper(*this, space),
vm_area(size, flags)
size_t vm_area_fixed::resize(size_t size)
{
// Not resizable
return m_size;
}
void
vm_area_open::commit(uintptr_t phys, uintptr_t offset, size_t count)
bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
{
m_mapper.map(offset, count, phys);
}
if (offset > m_size)
return false;
void
vm_area_open::uncommit(uintptr_t offset, size_t count)
{
m_mapper.unmap(offset, count);
phys = m_start + offset;
return true;
}
vm_area_buffers::vm_area_buffers(size_t size, vm_space &space, vm_flags flags, size_t buf_pages) :
m_mapper {*this, space},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
vm_area {size, flags}
{
}
vm_area_untracked::~vm_area_untracked()
{
}
bool
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset > m_size)
return false;
return frame_allocator::get().allocate(1, &phys);
}
bool
vm_area_untracked::add_to(vm_space *space)
{
if (!m_spaces.count())
return vm_area::add_to(space);
return m_spaces[0] == space;
}
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
m_mapped {nullptr},
vm_area {size, flags}
{
}
bool
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
{
return page_tree::find_or_add(m_mapped, offset, phys);
}
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
m_start {start},
m_pages {buf_pages},
m_next {memory::frame_size},
vm_area_untracked {size, flags}
{
}
uintptr_t
vm_area_buffers::get_buffer()
vm_area_guarded::get_section()
{
if (m_cache.count() > 0) {
return m_cache.pop();
@@ -100,33 +143,27 @@ vm_area_buffers::get_buffer()
uintptr_t addr = m_next;
m_next += (m_pages + 1) * memory::frame_size;
return m_mapper.space().lookup(*this, addr);
return m_start + addr;
}
void
vm_area_buffers::return_buffer(uintptr_t addr)
vm_area_guarded::return_section(uintptr_t addr)
{
m_cache.append(addr);
}
bool
vm_area_buffers::allowed(uintptr_t offset) const
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
{
if (offset >= m_next) return false;
if (offset > m_next)
return false;
// Buffers are m_pages big plus 1 leading guard page
return memory::page_align_down(offset) % (m_pages+1);
}
void
vm_area_buffers::commit(uintptr_t phys, uintptr_t offset, size_t count)
{
m_mapper.map(offset, count, phys);
}
void
vm_area_buffers::uncommit(uintptr_t offset, size_t count)
{
m_mapper.unmap(offset, count);
// make sure this isn't in a guard page. (sections are
// m_pages big plus 1 leading guard page, so page 0 is
// invalid)
if ((offset >> 12) % (m_pages+1) == 0)
return false;
return vm_area_untracked::get_page(offset, phys);
}

View File

@@ -11,8 +11,8 @@
#include "kernel_memory.h"
#include "objects/kobject.h"
#include "vm_mapper.h"
class page_tree;
class vm_space;
enum class vm_flags : uint32_t
@@ -55,156 +55,131 @@ public:
/// Get the flags set for this area
inline vm_flags flags() const { return m_flags; }
/// Track that this area was added to a vm_space
/// \arg space The space to add this area to
/// \returns False if this area cannot be added
virtual bool add_to(vm_space *space);
/// Track that this area was removed frm a vm_space
/// \arg space The space that is removing this area
/// \returns True if the removing space should free the pages
/// mapped for this area
virtual bool remove_from(vm_space *space);
/// Change the virtual size of the memory area. This may cause
/// deallocation if the new size is smaller than the current size.
/// Note that if resizing is unsuccessful, the previous size will
/// be returned.
/// \arg size The desired new virtual size
/// \returns The new virtual size
size_t resize(size_t size);
virtual size_t resize(size_t size);
/// Get the mapper object that maps this area to address spaces
virtual vm_mapper & mapper() = 0;
virtual const vm_mapper & mapper() const = 0;
/// Check whether allocation at the given offset is allowed
virtual bool allowed(uintptr_t offset) const { return true; }
/// Commit contiguous physical pages to this area
/// \arg phys The physical address of the first page
/// \arg offset The offset from the start of this area these pages represent
/// \arg count The number of pages
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count);
/// Uncommit physical pages from this area
/// \arg offset The offset from the start of this area these pages represent
/// \arg count The number of pages
virtual void uncommit(uintptr_t offset, size_t count);
/// Get the physical page for the given offset
/// \arg offset The offset into the VMA
/// \arg phys [out] Receives the physical page address, if any
/// \returns True if there should be a page at the given offset
virtual bool get_page(uintptr_t offset, uintptr_t &phys) = 0;
protected:
virtual void on_no_handles() override;
bool can_resize(size_t size);
size_t m_size;
vm_flags m_flags;
kutil::vector<vm_space*> m_spaces;
// Initial static space for m_spaces - most areas will never grow
// beyond this size, so avoid allocations
static constexpr size_t static_size = 2;
vm_space *m_vector_static[static_size];
};
/// The standard, sharable, user-controllable VMA type
class vm_area_shared :
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_shared(size_t size, vm_flags flags = vm_flags::none);
virtual ~vm_area_shared();
virtual vm_mapper & mapper() override { return m_mapper; }
virtual const vm_mapper & mapper() const override { return m_mapper; }
private:
vm_mapper_multi m_mapper;
};
/// A shareable but non-allocatable memory area (like mmio)
/// A shareable but non-allocatable memory area of contiguous physical
/// addresses (like mmio)
class vm_area_fixed :
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg start Starting physical address of this area
/// \arg size Size of the physical memory area
/// \arg flags Flags for this memory area
vm_area_fixed(size_t size, vm_flags flags = vm_flags::none);
vm_area_fixed(uintptr_t start, size_t size, vm_flags flags = vm_flags::none);
virtual ~vm_area_fixed();
virtual bool allowed(uintptr_t offset) const override { return false; }
virtual vm_mapper & mapper() override { return m_mapper; }
virtual const vm_mapper & mapper() const override { return m_mapper; }
virtual size_t resize(size_t size) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
vm_mapper_multi m_mapper;
uintptr_t m_start;
};
/// Area that allows open allocation (eg, kernel heap)
/// Area that allows open allocation
class vm_area_open :
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg space The address space this area belongs to
/// \arg flags Flags for this memory area
vm_area_open(size_t size, vm_space &space, vm_flags flags);
vm_area_open(size_t size, vm_flags flags);
virtual vm_mapper & mapper() override { return m_mapper; }
virtual const vm_mapper & mapper() const override { return m_mapper; }
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
virtual void uncommit(uintptr_t offset, size_t count) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
vm_mapper_single m_mapper;
page_tree *m_mapped;
};
/// Area split into standard-sized segments
class vm_area_buffers :
/// Area that does not track its allocations and thus cannot be shared
class vm_area_untracked :
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg space The address space this area belongs to
/// \arg flags Flags for this memory area
/// \arg buf_pages Pages in an individual buffer
vm_area_buffers(
vm_area_untracked(size_t size, vm_flags flags);
virtual ~vm_area_untracked();
virtual bool add_to(vm_space *space) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
};
/// Area split into standard-sized segments, separated by guard pages.
/// Based on vm_area_untracked, can not be shared.
class vm_area_guarded :
public vm_area_untracked
{
public:
/// Constructor.
/// \arg start Initial address where this area is mapped
/// \arg sec_pages Pages in an individual section
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_guarded(
uintptr_t start,
size_t sec_pages,
size_t size,
vm_space &space,
vm_flags flags,
size_t buf_pages);
vm_flags flags);
/// Get an available stack address
uintptr_t get_buffer();
/// Get an available section in this area
uintptr_t get_section();
/// Return a buffer address to the available pool
void return_buffer(uintptr_t addr);
/// Return a section address to the available pool
void return_section(uintptr_t addr);
virtual vm_mapper & mapper() override { return m_mapper; }
virtual const vm_mapper & mapper() const override { return m_mapper; }
virtual bool allowed(uintptr_t offset) const override;
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
virtual void uncommit(uintptr_t offset, size_t count) override;
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
private:
vm_mapper_single m_mapper;
kutil::vector<uintptr_t> m_cache;
uintptr_t m_start;
size_t m_pages;
uintptr_t m_next;
};
/// Area backed by an external source (like a loaded program)
class vm_area_backed :
public vm_area
{
public:
/// Constructor.
/// \arg size Initial virtual size of the memory area
/// \arg flags Flags for this memory area
vm_area_backed(size_t size, vm_flags flags);
virtual vm_mapper & mapper() override { return m_mapper; }
virtual const vm_mapper & mapper() const override { return m_mapper; }
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
virtual void uncommit(uintptr_t offset, size_t count) override;
private:
vm_mapper_multi m_mapper;
};
IS_BITFIELD(vm_flags);

152
src/kernel/page_tree.cpp Normal file
View File

@@ -0,0 +1,152 @@
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "frame_allocator.h"
#include "page_tree.h"
// Page tree levels map the following parts of a pagewise offset:
// (Note that a level 0's entries are physical page addrs, the rest
// map other page_tree nodes)
//
// Level 0: 0000000003f 64 pages / 256 KiB
// Level 1: 00000000fc0 4K pages / 16 MiB
// Level 2: 0000003f000 256K pages / 1 GiB
// Level 3: 00000fc0000 16M pages / 64 GiB
// Level 4: 0003f000000 1G pages / 4 TiB
// Level 5: 00fc0000000 64G pages / 256 TiB
// Level 6: 3f000000000 4T pages / 16 PiB -- Not supported until 5-level paging
static constexpr unsigned max_level = 5;
static constexpr unsigned bits_per_level = 6;
inline uint64_t to_word(uint64_t base, uint64_t level, uint64_t flags = 0) {
// Clear out the non-appropriate bits for this level
base &= (~0x3full << (level*bits_per_level));
return
(base & 0x3ffffffffff) |
((level & 0x7) << 42) |
((flags & 0x7ffff) << 45);
}
inline uint64_t to_base(uint64_t word) {
return word & 0x3ffffffffff;
}
inline uint64_t to_level(uint64_t word) {
return (word >> 42) & 0x3f;
}
inline uint64_t to_flags(uint64_t word) {
return (word >> 45);
}
inline bool contains(uint64_t page_off, uint64_t word, uint8_t &index) {
uint64_t base = to_base(word);
uint64_t bits = to_level(word) * bits_per_level;
index = (page_off >> bits) & 0x3f;
return (page_off & (~0x3full << bits)) != base;
}
inline uint64_t index_for(uint64_t page_off, uint8_t level) {
return (page_off >> (level*bits_per_level)) & 0x3f;
}
page_tree::page_tree(uint64_t base, uint8_t level) :
m_base {to_word(base, level)}
{
kutil::memset(m_entries, 0, sizeof(m_entries));
}
bool
page_tree::find(const page_tree *root, uint64_t offset, uintptr_t &page)
{
uint64_t page_off = offset >> 12; // change to pagewise offset
page_tree const *node = root;
while (node) {
uint8_t level = to_level(node->m_base);
uint8_t index = 0;
if (!contains(page_off, node->m_base, index))
return false;
if (!level) {
uintptr_t entry = node->m_entries[index].entry;
page = entry & ~1ull; // bit 0 marks 'present'
return (entry & 1);
}
node = node->m_entries[index].child;
}
return false;
}
bool
page_tree::find_or_add(page_tree * &root, uint64_t offset, uintptr_t &page)
{
uint64_t page_off = offset >> 12; // change to pagewise offset
page_tree *level0 = nullptr;
if (!root) {
// There's no root yet, just make a level0 and make it
// the root.
level0 = new page_tree(page_off, 0);
root = level0;
} else {
// Find or insert an existing level0
page_tree **parent = &root;
page_tree *node = root;
uint8_t parent_level = max_level + 1;
while (node) {
uint8_t level = to_level(node->m_base);
uint8_t index = 0;
if (!contains(page_off, node->m_base, index)) {
// We found a valid parent but the slot where this node should
// go contains another node. Insert an intermediate parent of
// this node and a new level0 into the parent.
uint64_t other = to_base(node->m_base);
uint8_t lcl = parent_level;
while (index_for(page_off, lcl) == index_for(other, lcl))
--lcl;
page_tree *inter = new page_tree(page_off, lcl);
inter->m_entries[index_for(other, lcl)].child = node;
*parent = inter;
level0 = new page_tree(page_off, 0);
inter->m_entries[index_for(page_off, lcl)].child = level0;
break;
}
if (!level) {
level0 = node;
break;
}
parent = &node->m_entries[index].child;
node = *parent;
}
kassert( node || parent, "Both node and parent were null in find_or_add");
if (!node) {
// We found a parent with an empty spot where this node should
// be. Insert a new level0 there.
level0 = new page_tree(page_off, 0);
*parent = level0;
}
}
kassert(level0, "Got through find_or_add without a level0");
uint8_t index = index_for(page_off, 0);
uint64_t &ent = level0->m_entries[index].entry;
if (!(ent & 1)) {
// No entry for this page exists, so make one
if (!frame_allocator::get().allocate(1, &ent))
return false;
ent |= 1;
}
page = ent & ~0xfffull;
return true;
}

39
src/kernel/page_tree.h Normal file
View File

@@ -0,0 +1,39 @@
#pragma once
/// \file page_tree.h
/// Definition of mapped page tracking structure and related definitions
#include <stdint.h>
/// A radix tree node that tracks mapped pages
class page_tree
{
public:
/// Get the physical address of the page at the given offset.
/// \arg root The root node of the tree
/// \arg offset Offset into the VMA, in bytes
/// \arg page [out] Receives the page physical address, if found
/// \returns True if a page was found
static bool find(const page_tree *root, uint64_t offset, uintptr_t &page);
/// Get the physical address of the page at the given offset. If one does
/// not exist yet, allocate a page, insert it, and return that.
/// \arg root [inout] The root node of the tree. This pointer may be updated.
/// \arg offset Offset into the VMA, in bytes
/// \arg page [out] Receives the page physical address, if found
/// \returns True if a page was found
static bool find_or_add(page_tree * &root, uint64_t offset, uintptr_t &page);
private:
page_tree(uint64_t base, uint8_t level);
/// Stores the page offset of the start of this node's pages in bits 0:41
/// and the depth of tree this node represents in bits 42:44 (0-7)
uint64_t m_base;
/// For a level 0 node, the entries area all physical page addresses.
/// Other nodes contain pointers to child tree nodes.
union {
uintptr_t entry;
page_tree *child;
} m_entries[64];
};

View File

@@ -14,7 +14,7 @@ j6_status_t
vma_create(j6_handle_t *handle, size_t size, uint32_t flags)
{
vm_flags f = vm_flags::user_mask & flags;
construct_handle<vm_area_shared>(handle, size, f);
construct_handle<vm_area_open>(handle, size, f);
return j6_status_ok;
}
@@ -22,7 +22,7 @@ j6_status_t
vma_create_map(j6_handle_t *handle, size_t size, uintptr_t base, uint32_t flags)
{
vm_flags f = vm_flags::user_mask & flags;
vm_area *a = construct_handle<vm_area_shared>(handle, size, f);
vm_area *a = construct_handle<vm_area_open>(handle, size, f);
process::current().space().add(base, a);
return j6_status_ok;
}

View File

@@ -1,108 +0,0 @@
#include "objects/vm_area.h"
#include "vm_mapper.h"
#include "vm_space.h"
vm_mapper_single::vm_mapper_single(vm_area &area, vm_space &space) :
m_area(area), m_space(space)
{}
vm_mapper_single::~vm_mapper_single()
{
m_space.clear(m_area, 0, memory::page_count(m_area.size()), true);
}
bool
vm_mapper_single::can_resize(size_t size) const
{
return m_space.can_resize(m_area, size);
}
void
vm_mapper_single::map(uintptr_t offset, size_t count, uintptr_t phys)
{
m_space.page_in(m_area, offset, phys, count);
}
void
vm_mapper_single::unmap(uintptr_t offset, size_t count)
{
m_space.clear(m_area, offset, count, true);
}
void
vm_mapper_single::remove(vm_space *space)
{
size_t count = memory::page_count(m_area.size());
bool keep = m_area.flags() && vm_flags::mmio;
m_space.clear(m_area, 0, count, !keep);
}
vm_mapper_multi::vm_mapper_multi(vm_area &area) :
m_area(area)
{
}
vm_mapper_multi::~vm_mapper_multi()
{
if (!m_spaces.count())
return;
size_t count = memory::page_count(m_area.size());
for (int i = 1; i < m_spaces.count(); ++i)
m_spaces[i]->clear(m_area, 0, count);
m_spaces[0]->clear(m_area, 0, count, true);
}
bool
vm_mapper_multi::can_resize(size_t size) const
{
for (auto &it : m_spaces)
if (!it->can_resize(m_area, size))
return false;
return true;
}
void
vm_mapper_multi::map(uintptr_t offset, size_t count, uintptr_t phys)
{
for (auto &it : m_spaces)
it->page_in(m_area, offset, phys, count);
}
void
vm_mapper_multi::unmap(uintptr_t offset, size_t count)
{
for (auto &it : m_spaces)
it->clear(m_area, offset, count);
}
void
vm_mapper_multi::add(vm_space *space)
{
if (m_spaces.count()) {
vm_space *source = m_spaces[0];
space->copy_from(*source, m_area);
}
m_spaces.append(space);
}
void
vm_mapper_multi::remove(vm_space *space)
{
size_t count = memory::page_count(m_area.size());
bool keep = m_area.flags() && vm_flags::mmio;
for (int i = 0; i < m_spaces.count(); ++i) {
if (m_spaces[i] == space) {
m_spaces.remove_swap_at(i);
keep &= m_spaces.count() > 0;
space->clear(m_area, 0, count, !keep);
}
}
}

View File

@@ -1,83 +0,0 @@
#pragma once
/// \file vm_mapper.h
/// VMA to address space mapping interface and implementing objects
#include <stdint.h>
#include "kutil/vector.h"
class vm_area;
class vm_space;
/// An interface to map vm_areas to one or more vm_spaces
class vm_mapper
{
public:
virtual ~vm_mapper() {}
/// Check whether the owning VMA can be resized to the given size.
/// \arg size The desired size
/// \returns True if resize is possible
virtual bool can_resize(size_t size) const = 0;
/// Map the given physical pages into the owning VMA at the given offset
/// \arg offset Offset into the VMA of the requested virtual address
/// \arg count Number of contiguous physical pages to map
/// \arg phys The starting physical address of the pages
virtual void map(uintptr_t offset, size_t count, uintptr_t phys) = 0;
/// Unmap the pages corresponding to the given offset from the owning VMA
/// \arg offset Offset into the VMA of the requested virtual address
/// \arg count Number of pages to unmap
virtual void unmap(uintptr_t offset, size_t count) = 0;
/// Add the given address space to the list of spaces the owning VMA is
/// mapped to, if applicable.
virtual void add(vm_space *space) {}
/// Remove the given address space from the list of spaces the owning VMA
/// is mapped to, if applicable.
virtual void remove(vm_space *space) {}
};
/// A vm_mapper that maps a VMA to a single vm_space
class vm_mapper_single :
public vm_mapper
{
public:
vm_mapper_single(vm_area &area, vm_space &space);
virtual ~vm_mapper_single();
virtual bool can_resize(size_t size) const override;
virtual void map(uintptr_t offset, size_t count, uintptr_t phys) override;
virtual void unmap(uintptr_t offset, size_t count) override;
vm_space & space() { return m_space; }
virtual void remove(vm_space *space) override;
private:
vm_area &m_area;
vm_space &m_space;
};
/// A vm_mapper that maps a VMA to multiple vm_spaces
class vm_mapper_multi :
public vm_mapper
{
public:
vm_mapper_multi(vm_area &area);
virtual ~vm_mapper_multi();
virtual bool can_resize(size_t size) const override;
virtual void map(uintptr_t offset, size_t count, uintptr_t phys) override;
virtual void unmap(uintptr_t offset, size_t count) override;
virtual void add(vm_space *space) override;
virtual void remove(vm_space *space) override;
private:
vm_area &m_area;
kutil::vector<vm_space*> m_spaces;
};

View File

@@ -45,15 +45,15 @@ vm_space::vm_space() :
vm_space::~vm_space()
{
for (auto &a : m_areas)
a.area->mapper().remove(this);
for (auto &a : m_areas) {
bool free = a.area->remove_from(this);
clear(*a.area, 0, memory::page_count(a.area->size()), free);
a.area->handle_release();
}
kassert(!is_kernel(), "Kernel vm_space destructor!");
vm_space &kernel = kernel_space();
if (active())
kernel.activate();
kernel_space().activate();
// All VMAs have been removed by now, so just
// free all remaining pages and tables
@@ -71,7 +71,7 @@ vm_space::add(uintptr_t base, vm_area *area)
{
//TODO: check for collisions
m_areas.sorted_insert({base, area});
area->mapper().add(this);
area->add_to(this);
area->handle_retain();
return true;
}
@@ -81,8 +81,9 @@ vm_space::remove(vm_area *area)
{
for (auto &a : m_areas) {
if (a.area == area) {
bool free = area->remove_from(this);
clear(*area, 0, memory::page_count(area->size()), free);
m_areas.remove(a);
area->mapper().remove(this);
area->handle_release();
return true;
}
@@ -257,30 +258,6 @@ vm_space::initialize_tcb(TCB &tcb)
~memory::page_offset;
}
size_t
vm_space::allocate(uintptr_t virt, size_t count, uintptr_t *phys)
{
uintptr_t base = 0;
vm_area *area = get(virt, &base);
uintptr_t offset = (virt & ~0xfffull) - base;
if (!area || !area->allowed(offset))
return 0;
uintptr_t addr = 0;
size_t n = frame_allocator::get().allocate(count, &addr);
void *mem = memory::to_virtual<void>(addr);
if (area->flags() && vm_flags::zero)
kutil::memset(mem, 0, count * memory::frame_size);
area->commit(addr, offset, 1);
if (phys)
*phys = addr;
return n;
}
bool
vm_space::handle_fault(uintptr_t addr, fault_type fault)
{
@@ -288,9 +265,22 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault)
if (fault && fault_type::present)
return false;
size_t n = allocate(addr, 1, nullptr);
kassert(n, "Failed to allocate a new page during page fault");
return n;
uintptr_t base = 0;
vm_area *area = get(addr, &base);
if (!area)
return false;
uintptr_t offset = (addr & ~0xfffull) - base;
uintptr_t phys_page = 0;
if (!area->get_page(offset, phys_page))
return false;
void *mem = memory::to_virtual<void>(phys_page);
if (area->flags() && vm_flags::zero)
kutil::memset(mem, 0, memory::frame_size);
page_in(*area, offset, phys_page, 1);
return true;
}
size_t

View File

@@ -105,7 +105,7 @@ public:
static size_t copy(vm_space &source, vm_space &dest, void *from, void *to, size_t length);
private:
friend class vm_mapper_single;
friend class vm_area;
friend class vm_mapper_multi;
/// Find a given VMA in this address space