[kernel] Implement VMA page tracking
The previous method of VMA page tracking relied on the VMA always being mapped at least into one space and just kept track of pages in the spaces' page tables. This had a number of drawbacks, and the mapper system was too complex without much benefit. Now make VMAs themselves keep track of spaces that they're a part of, and make them responsible for knowing what page goes where. This simplifies most types of VMA greatly. The new vm_area_open (nee vm_area_shared, but there is now no reason for most VMAs to be explicitly shareable) adds a 64-ary radix tree for tracking allocated pages. The page_tree cannot yet handle taking pages away, but this isn't something jsix can do yet anyway.
This commit is contained in:
@@ -4,13 +4,13 @@
|
||||
#include "objects/channel.h"
|
||||
#include "objects/vm_area.h"
|
||||
|
||||
extern vm_area_buffers g_kernel_buffers;
|
||||
extern vm_area_guarded g_kernel_buffers;
|
||||
|
||||
constexpr size_t buffer_bytes = memory::kernel_buffer_pages * memory::frame_size;
|
||||
|
||||
channel::channel() :
|
||||
m_len(0),
|
||||
m_data(g_kernel_buffers.get_buffer()),
|
||||
m_data(g_kernel_buffers.get_section()),
|
||||
m_buffer(reinterpret_cast<uint8_t*>(m_data), buffer_bytes),
|
||||
kobject(kobject::type::channel, j6_signal_channel_can_send)
|
||||
{
|
||||
@@ -79,7 +79,7 @@ void
|
||||
channel::close()
|
||||
{
|
||||
kobject::close();
|
||||
g_kernel_buffers.return_buffer(m_data);
|
||||
g_kernel_buffers.return_section(m_data);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -102,8 +102,8 @@ process::create_thread(uint8_t priority, bool user)
|
||||
if (user) {
|
||||
uintptr_t stack_top = stacks_top - (m_threads.count() * stack_size);
|
||||
|
||||
vm_area *vma = new vm_area_open(stack_size, m_space,
|
||||
vm_flags::zero|vm_flags::write);
|
||||
vm_flags flags = vm_flags::zero|vm_flags::write;
|
||||
vm_area *vma = new vm_area_open(stack_size, flags);
|
||||
m_space.add(stack_top - stack_size, vma);
|
||||
|
||||
// Space for null frame - because the page gets zeroed on
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
extern "C" void kernel_to_user_trampoline();
|
||||
static constexpr j6_signal_t thread_default_signals = 0;
|
||||
|
||||
extern vm_area_buffers g_kernel_stacks;
|
||||
extern vm_area_guarded g_kernel_stacks;
|
||||
|
||||
thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
|
||||
kobject(kobject::type::thread, thread_default_signals),
|
||||
@@ -32,7 +32,7 @@ thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) :
|
||||
|
||||
thread::~thread()
|
||||
{
|
||||
g_kernel_stacks.return_buffer(m_tcb.kernel_stack);
|
||||
g_kernel_stacks.return_section(m_tcb.kernel_stack);
|
||||
}
|
||||
|
||||
thread *
|
||||
@@ -204,7 +204,7 @@ thread::setup_kernel_stack()
|
||||
constexpr unsigned null_frame_entries = 2;
|
||||
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
|
||||
|
||||
uintptr_t stack_addr = g_kernel_stacks.get_buffer();
|
||||
uintptr_t stack_addr = g_kernel_stacks.get_section();
|
||||
uintptr_t stack_end = stack_addr + stack_bytes;
|
||||
|
||||
uint64_t *null_frame = reinterpret_cast<uint64_t*>(stack_end - null_frame_size);
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#include "frame_allocator.h"
|
||||
#include "kernel_memory.h"
|
||||
#include "objects/vm_area.h"
|
||||
#include "page_tree.h"
|
||||
#include "vm_space.h"
|
||||
|
||||
using memory::frame_size;
|
||||
@@ -7,30 +9,31 @@ using memory::frame_size;
|
||||
vm_area::vm_area(size_t size, vm_flags flags) :
|
||||
m_size {size},
|
||||
m_flags {flags},
|
||||
m_spaces {m_vector_static, 0, static_size},
|
||||
kobject {kobject::type::vma}
|
||||
{
|
||||
}
|
||||
|
||||
vm_area::~vm_area() {}
|
||||
|
||||
size_t
|
||||
vm_area::resize(size_t size)
|
||||
bool
|
||||
vm_area::add_to(vm_space *space)
|
||||
{
|
||||
if (mapper().can_resize(size))
|
||||
m_size = size;
|
||||
return m_size;
|
||||
for (auto *s : m_spaces) {
|
||||
if (s == space)
|
||||
return true;
|
||||
}
|
||||
m_spaces.append(space);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
vm_area::commit(uintptr_t phys, uintptr_t offset, size_t count)
|
||||
bool
|
||||
vm_area::remove_from(vm_space *space)
|
||||
{
|
||||
mapper().map(offset, count, phys);
|
||||
}
|
||||
|
||||
void
|
||||
vm_area::uncommit(uintptr_t offset, size_t count)
|
||||
{
|
||||
mapper().unmap(offset, count);
|
||||
m_spaces.remove_swap(space);
|
||||
return
|
||||
!m_spaces.count() &&
|
||||
!(m_flags && vm_flags::mmio);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -40,21 +43,25 @@ vm_area::on_no_handles()
|
||||
delete this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
vm_area_shared::vm_area_shared(size_t size, vm_flags flags) :
|
||||
m_mapper {*this},
|
||||
vm_area {size, flags}
|
||||
size_t
|
||||
vm_area::resize(size_t size)
|
||||
{
|
||||
if (can_resize(size))
|
||||
m_size = size;
|
||||
return m_size;
|
||||
}
|
||||
|
||||
vm_area_shared::~vm_area_shared()
|
||||
bool
|
||||
vm_area::can_resize(size_t size)
|
||||
{
|
||||
for (auto *space : m_spaces)
|
||||
if (!space->can_resize(*this, size))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
vm_area_fixed::vm_area_fixed(size_t size, vm_flags flags) :
|
||||
m_mapper {*this},
|
||||
vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) :
|
||||
m_start {start},
|
||||
vm_area {size, flags}
|
||||
{
|
||||
}
|
||||
@@ -63,36 +70,72 @@ vm_area_fixed::~vm_area_fixed()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
vm_area_open::vm_area_open(size_t size, vm_space &space, vm_flags flags) :
|
||||
m_mapper(*this, space),
|
||||
vm_area(size, flags)
|
||||
size_t vm_area_fixed::resize(size_t size)
|
||||
{
|
||||
// Not resizable
|
||||
return m_size;
|
||||
}
|
||||
|
||||
void
|
||||
vm_area_open::commit(uintptr_t phys, uintptr_t offset, size_t count)
|
||||
bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys)
|
||||
{
|
||||
m_mapper.map(offset, count, phys);
|
||||
}
|
||||
if (offset > m_size)
|
||||
return false;
|
||||
|
||||
void
|
||||
vm_area_open::uncommit(uintptr_t offset, size_t count)
|
||||
{
|
||||
m_mapper.unmap(offset, count);
|
||||
phys = m_start + offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
vm_area_buffers::vm_area_buffers(size_t size, vm_space &space, vm_flags flags, size_t buf_pages) :
|
||||
m_mapper {*this, space},
|
||||
m_pages {buf_pages},
|
||||
m_next {memory::frame_size},
|
||||
vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) :
|
||||
vm_area {size, flags}
|
||||
{
|
||||
}
|
||||
|
||||
vm_area_untracked::~vm_area_untracked()
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys)
|
||||
{
|
||||
if (offset > m_size)
|
||||
return false;
|
||||
|
||||
return frame_allocator::get().allocate(1, &phys);
|
||||
}
|
||||
|
||||
bool
|
||||
vm_area_untracked::add_to(vm_space *space)
|
||||
{
|
||||
if (!m_spaces.count())
|
||||
return vm_area::add_to(space);
|
||||
return m_spaces[0] == space;
|
||||
}
|
||||
|
||||
|
||||
vm_area_open::vm_area_open(size_t size, vm_flags flags) :
|
||||
m_mapped {nullptr},
|
||||
vm_area {size, flags}
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
vm_area_open::get_page(uintptr_t offset, uintptr_t &phys)
|
||||
{
|
||||
return page_tree::find_or_add(m_mapped, offset, phys);
|
||||
}
|
||||
|
||||
|
||||
vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, vm_flags flags) :
|
||||
m_start {start},
|
||||
m_pages {buf_pages},
|
||||
m_next {memory::frame_size},
|
||||
vm_area_untracked {size, flags}
|
||||
{
|
||||
}
|
||||
|
||||
uintptr_t
|
||||
vm_area_buffers::get_buffer()
|
||||
vm_area_guarded::get_section()
|
||||
{
|
||||
if (m_cache.count() > 0) {
|
||||
return m_cache.pop();
|
||||
@@ -100,33 +143,27 @@ vm_area_buffers::get_buffer()
|
||||
|
||||
uintptr_t addr = m_next;
|
||||
m_next += (m_pages + 1) * memory::frame_size;
|
||||
return m_mapper.space().lookup(*this, addr);
|
||||
return m_start + addr;
|
||||
}
|
||||
|
||||
void
|
||||
vm_area_buffers::return_buffer(uintptr_t addr)
|
||||
vm_area_guarded::return_section(uintptr_t addr)
|
||||
{
|
||||
m_cache.append(addr);
|
||||
}
|
||||
|
||||
bool
|
||||
vm_area_buffers::allowed(uintptr_t offset) const
|
||||
vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys)
|
||||
{
|
||||
if (offset >= m_next) return false;
|
||||
if (offset > m_next)
|
||||
return false;
|
||||
|
||||
// Buffers are m_pages big plus 1 leading guard page
|
||||
return memory::page_align_down(offset) % (m_pages+1);
|
||||
}
|
||||
|
||||
void
|
||||
vm_area_buffers::commit(uintptr_t phys, uintptr_t offset, size_t count)
|
||||
{
|
||||
m_mapper.map(offset, count, phys);
|
||||
}
|
||||
|
||||
void
|
||||
vm_area_buffers::uncommit(uintptr_t offset, size_t count)
|
||||
{
|
||||
m_mapper.unmap(offset, count);
|
||||
// make sure this isn't in a guard page. (sections are
|
||||
// m_pages big plus 1 leading guard page, so page 0 is
|
||||
// invalid)
|
||||
if ((offset >> 12) % (m_pages+1) == 0)
|
||||
return false;
|
||||
|
||||
return vm_area_untracked::get_page(offset, phys);
|
||||
}
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
|
||||
#include "kernel_memory.h"
|
||||
#include "objects/kobject.h"
|
||||
#include "vm_mapper.h"
|
||||
|
||||
class page_tree;
|
||||
class vm_space;
|
||||
|
||||
enum class vm_flags : uint32_t
|
||||
@@ -55,156 +55,131 @@ public:
|
||||
/// Get the flags set for this area
|
||||
inline vm_flags flags() const { return m_flags; }
|
||||
|
||||
/// Track that this area was added to a vm_space
|
||||
/// \arg space The space to add this area to
|
||||
/// \returns False if this area cannot be added
|
||||
virtual bool add_to(vm_space *space);
|
||||
|
||||
/// Track that this area was removed frm a vm_space
|
||||
/// \arg space The space that is removing this area
|
||||
/// \returns True if the removing space should free the pages
|
||||
/// mapped for this area
|
||||
virtual bool remove_from(vm_space *space);
|
||||
|
||||
/// Change the virtual size of the memory area. This may cause
|
||||
/// deallocation if the new size is smaller than the current size.
|
||||
/// Note that if resizing is unsuccessful, the previous size will
|
||||
/// be returned.
|
||||
/// \arg size The desired new virtual size
|
||||
/// \returns The new virtual size
|
||||
size_t resize(size_t size);
|
||||
virtual size_t resize(size_t size);
|
||||
|
||||
/// Get the mapper object that maps this area to address spaces
|
||||
virtual vm_mapper & mapper() = 0;
|
||||
virtual const vm_mapper & mapper() const = 0;
|
||||
|
||||
/// Check whether allocation at the given offset is allowed
|
||||
virtual bool allowed(uintptr_t offset) const { return true; }
|
||||
|
||||
/// Commit contiguous physical pages to this area
|
||||
/// \arg phys The physical address of the first page
|
||||
/// \arg offset The offset from the start of this area these pages represent
|
||||
/// \arg count The number of pages
|
||||
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count);
|
||||
|
||||
/// Uncommit physical pages from this area
|
||||
/// \arg offset The offset from the start of this area these pages represent
|
||||
/// \arg count The number of pages
|
||||
virtual void uncommit(uintptr_t offset, size_t count);
|
||||
/// Get the physical page for the given offset
|
||||
/// \arg offset The offset into the VMA
|
||||
/// \arg phys [out] Receives the physical page address, if any
|
||||
/// \returns True if there should be a page at the given offset
|
||||
virtual bool get_page(uintptr_t offset, uintptr_t &phys) = 0;
|
||||
|
||||
protected:
|
||||
virtual void on_no_handles() override;
|
||||
bool can_resize(size_t size);
|
||||
|
||||
size_t m_size;
|
||||
vm_flags m_flags;
|
||||
kutil::vector<vm_space*> m_spaces;
|
||||
|
||||
// Initial static space for m_spaces - most areas will never grow
|
||||
// beyond this size, so avoid allocations
|
||||
static constexpr size_t static_size = 2;
|
||||
vm_space *m_vector_static[static_size];
|
||||
};
|
||||
|
||||
|
||||
/// The standard, sharable, user-controllable VMA type
|
||||
class vm_area_shared :
|
||||
public vm_area
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg flags Flags for this memory area
|
||||
vm_area_shared(size_t size, vm_flags flags = vm_flags::none);
|
||||
virtual ~vm_area_shared();
|
||||
|
||||
virtual vm_mapper & mapper() override { return m_mapper; }
|
||||
virtual const vm_mapper & mapper() const override { return m_mapper; }
|
||||
|
||||
private:
|
||||
vm_mapper_multi m_mapper;
|
||||
};
|
||||
|
||||
|
||||
/// A shareable but non-allocatable memory area (like mmio)
|
||||
/// A shareable but non-allocatable memory area of contiguous physical
|
||||
/// addresses (like mmio)
|
||||
class vm_area_fixed :
|
||||
public vm_area
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg start Starting physical address of this area
|
||||
/// \arg size Size of the physical memory area
|
||||
/// \arg flags Flags for this memory area
|
||||
vm_area_fixed(size_t size, vm_flags flags = vm_flags::none);
|
||||
vm_area_fixed(uintptr_t start, size_t size, vm_flags flags = vm_flags::none);
|
||||
virtual ~vm_area_fixed();
|
||||
|
||||
virtual bool allowed(uintptr_t offset) const override { return false; }
|
||||
virtual vm_mapper & mapper() override { return m_mapper; }
|
||||
virtual const vm_mapper & mapper() const override { return m_mapper; }
|
||||
virtual size_t resize(size_t size) override;
|
||||
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
|
||||
|
||||
private:
|
||||
vm_mapper_multi m_mapper;
|
||||
uintptr_t m_start;
|
||||
};
|
||||
|
||||
|
||||
/// Area that allows open allocation (eg, kernel heap)
|
||||
/// Area that allows open allocation
|
||||
class vm_area_open :
|
||||
public vm_area
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg space The address space this area belongs to
|
||||
/// \arg flags Flags for this memory area
|
||||
vm_area_open(size_t size, vm_space &space, vm_flags flags);
|
||||
vm_area_open(size_t size, vm_flags flags);
|
||||
|
||||
virtual vm_mapper & mapper() override { return m_mapper; }
|
||||
virtual const vm_mapper & mapper() const override { return m_mapper; }
|
||||
|
||||
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
|
||||
virtual void uncommit(uintptr_t offset, size_t count) override;
|
||||
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
|
||||
|
||||
private:
|
||||
vm_mapper_single m_mapper;
|
||||
page_tree *m_mapped;
|
||||
};
|
||||
|
||||
|
||||
/// Area split into standard-sized segments
|
||||
class vm_area_buffers :
|
||||
public vm_area
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg space The address space this area belongs to
|
||||
/// \arg flags Flags for this memory area
|
||||
/// \arg buf_pages Pages in an individual buffer
|
||||
vm_area_buffers(
|
||||
size_t size,
|
||||
vm_space &space,
|
||||
vm_flags flags,
|
||||
size_t buf_pages);
|
||||
|
||||
/// Get an available stack address
|
||||
uintptr_t get_buffer();
|
||||
|
||||
/// Return a buffer address to the available pool
|
||||
void return_buffer(uintptr_t addr);
|
||||
|
||||
virtual vm_mapper & mapper() override { return m_mapper; }
|
||||
virtual const vm_mapper & mapper() const override { return m_mapper; }
|
||||
|
||||
virtual bool allowed(uintptr_t offset) const override;
|
||||
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
|
||||
virtual void uncommit(uintptr_t offset, size_t count) override;
|
||||
|
||||
private:
|
||||
vm_mapper_single m_mapper;
|
||||
kutil::vector<uintptr_t> m_cache;
|
||||
size_t m_pages;
|
||||
uintptr_t m_next;
|
||||
};
|
||||
|
||||
|
||||
/// Area backed by an external source (like a loaded program)
|
||||
class vm_area_backed :
|
||||
/// Area that does not track its allocations and thus cannot be shared
|
||||
class vm_area_untracked :
|
||||
public vm_area
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg flags Flags for this memory area
|
||||
vm_area_backed(size_t size, vm_flags flags);
|
||||
vm_area_untracked(size_t size, vm_flags flags);
|
||||
virtual ~vm_area_untracked();
|
||||
|
||||
virtual vm_mapper & mapper() override { return m_mapper; }
|
||||
virtual const vm_mapper & mapper() const override { return m_mapper; }
|
||||
|
||||
virtual void commit(uintptr_t phys, uintptr_t offset, size_t count) override;
|
||||
virtual void uncommit(uintptr_t offset, size_t count) override;
|
||||
|
||||
private:
|
||||
vm_mapper_multi m_mapper;
|
||||
virtual bool add_to(vm_space *space) override;
|
||||
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
|
||||
};
|
||||
|
||||
|
||||
/// Area split into standard-sized segments, separated by guard pages.
|
||||
/// Based on vm_area_untracked, can not be shared.
|
||||
class vm_area_guarded :
|
||||
public vm_area_untracked
|
||||
{
|
||||
public:
|
||||
/// Constructor.
|
||||
/// \arg start Initial address where this area is mapped
|
||||
/// \arg sec_pages Pages in an individual section
|
||||
/// \arg size Initial virtual size of the memory area
|
||||
/// \arg flags Flags for this memory area
|
||||
vm_area_guarded(
|
||||
uintptr_t start,
|
||||
size_t sec_pages,
|
||||
size_t size,
|
||||
vm_flags flags);
|
||||
|
||||
/// Get an available section in this area
|
||||
uintptr_t get_section();
|
||||
|
||||
/// Return a section address to the available pool
|
||||
void return_section(uintptr_t addr);
|
||||
|
||||
virtual bool get_page(uintptr_t offset, uintptr_t &phys) override;
|
||||
|
||||
private:
|
||||
kutil::vector<uintptr_t> m_cache;
|
||||
uintptr_t m_start;
|
||||
size_t m_pages;
|
||||
uintptr_t m_next;
|
||||
};
|
||||
|
||||
|
||||
IS_BITFIELD(vm_flags);
|
||||
|
||||
Reference in New Issue
Block a user