[kernel] Move the page table cache into page_table

Further chipping away at page_manager: the cache of pages to be used as
page tables gets moved to a static in page_table.
This commit is contained in:
2020-09-17 21:30:05 -07:00
parent 09575370ce
commit ac67111b83
7 changed files with 86 additions and 71 deletions

View File

@@ -111,7 +111,7 @@ memory_initialize_pre_ctors(args::header *kargs)
// Create the page manager // Create the page manager
new (&g_page_manager) page_manager {g_frame_allocator, kpml4}; new (&g_page_manager) page_manager {g_frame_allocator, kpml4};
vm_space &vm = *new (&g_kernel_space) vm_space {kpml4, true}; vm_space &vm = *new (&g_kernel_space) vm_space {kpml4};
vm.allow(memory::heap_start, memory::kernel_max_heap, true); vm.allow(memory::heap_start, memory::kernel_max_heap, true);
} }

View File

@@ -37,16 +37,10 @@ pt_from_phys(uintptr_t p)
} }
struct free_page_header
{
free_page_header *next;
size_t count;
};
page_manager::page_manager(frame_allocator &frames, page_table *pml4) : page_manager::page_manager(frame_allocator &frames, page_table *pml4) :
m_kernel_pml4(pml4), m_kernel_pml4(pml4),
m_page_cache(nullptr),
m_frames(frames) m_frames(frames)
{ {
} }
@@ -54,7 +48,7 @@ page_manager::page_manager(frame_allocator &frames, page_table *pml4) :
page_table * page_table *
page_manager::create_process_map() page_manager::create_process_map()
{ {
page_table *table = get_table_page(); page_table *table = page_table::get_table_page();
kutil::memset(table, 0, frame_size/2); kutil::memset(table, 0, frame_size/2);
for (unsigned i = pml4e_kernel; i < table_entries; ++i) for (unsigned i = pml4e_kernel; i < table_entries; ++i)
@@ -107,47 +101,6 @@ page_manager::dump_pml4(page_table *pml4, bool recurse)
pml4->dump(page_table::level::pml4, recurse); pml4->dump(page_table::level::pml4, recurse);
} }
page_table *
page_manager::get_table_page()
{
if (!m_page_cache) {
uintptr_t phys = 0;
size_t n = m_frames.allocate(32, &phys); // TODO: indicate frames must be offset-mappable
uintptr_t virt = phys + page_offset;
m_page_cache = reinterpret_cast<free_page_header *>(virt);
// The last one needs to be null, so do n-1
uintptr_t end = virt + (n-1) * frame_size;
while (virt < end) {
reinterpret_cast<free_page_header *>(virt)->next =
reinterpret_cast<free_page_header *>(virt + frame_size);
virt += frame_size;
}
reinterpret_cast<free_page_header *>(virt)->next = nullptr;
log::info(logs::paging, "Mappd %d new page table pages at %lx", n, phys);
}
free_page_header *page = m_page_cache;
m_page_cache = page->next;
return reinterpret_cast<page_table *>(page);
}
void
page_manager::free_table_pages(void *pages, size_t count)
{
uintptr_t start = reinterpret_cast<uintptr_t>(pages);
for (size_t i = 0; i < count; ++i) {
uintptr_t addr = start + (i * frame_size);
free_page_header *header = reinterpret_cast<free_page_header *>(addr);
header->count = 1;
header->next = m_page_cache;
m_page_cache = header;
}
}
void * void *
page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4) page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4)
{ {
@@ -233,7 +186,7 @@ page_manager::unmap_table(page_table *table, page_table::level lvl, bool free, p
m_frames.free(free_start, (free_count * size) / frame_size); m_frames.free(free_start, (free_count * size) / frame_size);
} }
free_table_pages(table, 1); page_table::free_table_page(table);
log::debug(logs::paging, "Unmapped%s lv %d table at %016lx", log::debug(logs::paging, "Unmapped%s lv %d table at %016lx",
free ? " (and freed)" : "", lvl, table); free ? " (and freed)" : "", lvl, table);
@@ -255,7 +208,7 @@ page_manager::check_needs_page(page_table *table, unsigned index, bool user)
{ {
if ((table->entries[index] & 0x1) == 1) return; if ((table->entries[index] & 0x1) == 1) return;
page_table *new_table = get_table_page(); page_table *new_table = page_table::get_table_page();
for (int i=0; i<table_entries; ++i) new_table->entries[i] = 0; for (int i=0; i<table_entries; ++i) new_table->entries[i] = 0;
table->entries[index] = pt_to_phys(new_table) | (user ? user_table_flags : sys_table_flags); table->entries[index] = pt_to_phys(new_table) | (user ? user_table_flags : sys_table_flags);
} }

View File

@@ -90,15 +90,6 @@ public:
inline page_table * get_kernel_pml4() { return m_kernel_pml4; } inline page_table * get_kernel_pml4() { return m_kernel_pml4; }
private: private:
/// Allocate a page for a page table, or pull one from the cache
/// \returns An empty page mapped in page space
page_table * get_table_page();
/// Return a set of mapped contiguous pages to the page cache.
/// \arg pages Pointer to the first page to be returned
/// \arg count Number of pages in the range
void free_table_pages(void *pages, size_t count);
/// Helper function to allocate a new page table. If table entry `i` in /// Helper function to allocate a new page table. If table entry `i` in
/// table `base` is empty, allocate a new page table and point `base[i]` at /// table `base` is empty, allocate a new page table and point `base[i]` at
/// it. /// it.
@@ -138,7 +129,6 @@ private:
page_table_indices index = {}); page_table_indices index = {});
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
frame_allocator &m_frames; frame_allocator &m_frames;

View File

@@ -8,6 +8,11 @@
using memory::page_offset; using memory::page_offset;
using level = page_table::level; using level = page_table::level;
extern frame_allocator &g_frame_allocator;
free_page_header * page_table::s_page_cache = nullptr;
size_t page_table::s_cache_count = 0;
// Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003 // Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003
// IGNORED | | | | | | | +- Present // IGNORED | | | | | | | +- Present
// | | | | | | +--- Writeable // | | | | | | +--- Writeable
@@ -20,6 +25,7 @@ using level = page_table::level;
/// Page table entry flags for entries pointing at another table /// Page table entry flags for entries pointing at another table
constexpr uint16_t table_flags = 0x003; constexpr uint16_t table_flags = 0x003;
page_table::iterator::iterator(uintptr_t virt, page_table *pml4) : page_table::iterator::iterator(uintptr_t virt, page_table *pml4) :
m_table {pml4, 0, 0, 0} m_table {pml4, 0, 0, 0}
{ {
@@ -157,9 +163,6 @@ page_table::iterator::ensure_table(level l)
if (l == level::pml4 || l > level::pt) return; if (l == level::pml4 || l > level::pt) return;
if (check_table(l)) return; if (check_table(l)) return;
// TODO: a better way to get at the frame allocator
extern frame_allocator g_frame_allocator;
uintptr_t phys = 0; uintptr_t phys = 0;
size_t n = g_frame_allocator.allocate(1, &phys); size_t n = g_frame_allocator.allocate(1, &phys);
kassert(n, "Failed to allocate a page table"); kassert(n, "Failed to allocate a page table");
@@ -195,6 +198,55 @@ page_table::set(int i, page_table *p, uint16_t flags)
(flags & 0xfff); (flags & 0xfff);
} }
struct free_page_header { free_page_header *next; };
page_table *
page_table::get_table_page()
{
if (!s_cache_count)
fill_table_page_cache();
free_page_header *page = s_page_cache;
s_page_cache = s_page_cache->next;
--s_cache_count;
return reinterpret_cast<page_table*>(page);
}
void
page_table::free_table_page(page_table *pt)
{
free_page_header *page =
reinterpret_cast<free_page_header*>(pt);
page->next = s_page_cache;
s_page_cache = page->next;
++s_cache_count;
}
void
page_table::fill_table_page_cache()
{
constexpr size_t min_pages = 16;
while (s_cache_count < min_pages) {
uintptr_t phys = 0;
size_t n = g_frame_allocator.allocate(min_pages - s_cache_count, &phys);
free_page_header *start =
memory::to_virtual<free_page_header>(phys);
for (int i = 0; i < n - 1; ++i)
kutil::offset_pointer(start, i * memory::frame_size)
->next = kutil::offset_pointer(start, (i+1) * memory::frame_size);
free_page_header *end =
kutil::offset_pointer(start, (n-1) * memory::frame_size);
end->next = s_page_cache;
s_page_cache = start;
s_cache_count += n;
}
}
void void
page_table::dump(page_table::level lvl, bool recurse) page_table::dump(page_table::level lvl, bool recurse)

View File

@@ -5,6 +5,7 @@
#include <stdint.h> #include <stdint.h>
#include "kernel_memory.h" #include "kernel_memory.h"
struct free_page_header;
class page_manager; class page_manager;
/// Struct to allow easy accessing of a memory page being used as a page table. /// Struct to allow easy accessing of a memory page being used as a page table.
@@ -111,6 +112,21 @@ struct page_table
uint16_t m_index[D]; uint16_t m_index[D];
}; };
/// Allocate a page for a page table, or pull one from the cache
/// \returns An empty page, mapped in the linear offset area
static page_table * get_table_page();
/// Return a page table's page to the page cache.
/// \arg pt The page to be returned
static void free_table_page(page_table *pt);
// Ensure the page table page cache has a minimum number of pages
// in it.
static void fill_table_page_cache();
static free_page_header *s_page_cache; ///< Cache of free pages to use for tables
static size_t s_cache_count; ///< Number of pages in s_page_cache
/// Get an entry in the page table as a page_table pointer /// Get an entry in the page table as a page_table pointer
/// \arg i Index of the entry in this page table /// \arg i Index of the entry in this page table
/// \arg flags [out] If set, this will receive the entry's flags /// \arg flags [out] If set, this will receive the entry's flags

View File

@@ -19,9 +19,10 @@ vm_space::area::operator==(const vm_space::area &o) const
} }
vm_space::vm_space(page_table *p, bool kernel) : vm_space::vm_space(page_table *p) : m_kernel(true), m_pml4(p) {}
m_kernel(kernel),
m_pml4(p) vm_space::vm_space() :
m_kernel(false)
{ {
} }

View File

@@ -14,10 +14,12 @@ class vm_area;
class vm_space class vm_space
{ {
public: public:
/// Constructor. /// Constructor for the kernel address space
/// \arg pml4 The pml4 for this address space /// \arg pml4 The existing kernel PML4
/// \arg kernel True if this is the kernel address space vm_space(page_table *pml4);
vm_space(page_table *pml4, bool kernel = false);
/// Constructor. Creates a new address space.
vm_space();
~vm_space(); ~vm_space();
@@ -81,6 +83,7 @@ private:
bool m_kernel; bool m_kernel;
page_table *m_pml4; page_table *m_pml4;
struct area { struct area {
uintptr_t base; uintptr_t base;
vm_area *area; vm_area *area;