Enable paging WIP

This commit is contained in:
Justin C. Miller
2018-04-25 10:48:14 -07:00
parent fd9e0944cb
commit bed882f41c
5 changed files with 310 additions and 144 deletions

View File

@@ -7,6 +7,7 @@
#include "interrupts.h" #include "interrupts.h"
#include "kernel_data.h" #include "kernel_data.h"
#include "memory.h" #include "memory.h"
#include "memory_pages.h"
#include "screen.h" #include "screen.h"
extern "C" { extern "C" {
@@ -46,6 +47,9 @@ kernel_main(popcorn_data *header)
{ {
console *cons = new (&g_console) console(); console *cons = new (&g_console) console();
page_manager *pager = new (&g_page_manager) page_manager;
pager->mark_offset_pointer(&header->frame_buffer, header->frame_buffer_length);
memory_initialize_managers( memory_initialize_managers(
header->memory_map, header->memory_map,
header->memory_map_length, header->memory_map_length,

View File

@@ -14,8 +14,6 @@ public:
private: private:
friend class page_manager; friend class page_manager;
}; };
extern memory_manager g_memory_manager; extern memory_manager g_memory_manager;

View File

@@ -273,7 +273,8 @@ gather_block_lists(
} }
page_block * page_block *
fill_page_with_blocks(uint64_t start) { fill_page_with_blocks(uint64_t start)
{
uint64_t end = page_align(start); uint64_t end = page_align(start);
uint64_t count = (end - start) / sizeof(page_block); uint64_t count = (end - start) / sizeof(page_block);
if (count == 0) return nullptr; if (count == 0) return nullptr;
@@ -294,7 +295,8 @@ copy_new_table(page_table *base, unsigned index, page_table *new_table)
if(entry & 0x80) return; if(entry & 0x80) return;
if (entry & 0x1) { if (entry & 0x1) {
page_table *old_next = base->next(index); page_table *old_next = reinterpret_cast<page_table *>(
base->entries[index] & ~0xffful);
for (int i = 0; i < 512; ++i) new_table->entries[i] = old_next->entries[i]; for (int i = 0; i < 512; ++i) new_table->entries[i] = old_next->entries[i];
} else { } else {
for (int i = 0; i < 512; ++i) new_table->entries[i] = 0; for (int i = 0; i < 512; ++i) new_table->entries[i] = 0;
@@ -348,6 +350,58 @@ find_efi_free_aligned_pages(const void *memory_map, size_t map_length, size_t de
return start_phys; return start_phys;
} }
static unsigned
check_needs_page_ident(page_table *table, unsigned index, page_table **free_pages)
{
if (table->entries[index] & 0x1 == 1) return 0;
kassert(*free_pages, "check_needs_page_ident needed to allocate but had no free pages");
page_table *new_table = (*free_pages)++;
for (int i=0; i<512; ++i) new_table->entries[i] = 0;
table->entries[index] = reinterpret_cast<uint64_t>(new_table) | 0xb;
return 1;
}
static unsigned
page_in_ident(
page_table *pml4,
uint64_t phys_addr,
uint64_t virt_addr,
uint64_t count,
page_table *free_pages)
{
page_table_indices idx{virt_addr};
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
unsigned pages_consumed = 0;
for (; idx[0] < 512; idx[0] += 1) {
pages_consumed += check_needs_page_ident(tables[0], idx[0], &free_pages);
tables[1] = reinterpret_cast<page_table *>(
tables[0]->entries[idx[0]] & ~0xfffull);
for (; idx[1] < 512; idx[1] += 1, idx[2] = 0, idx[3] = 0) {
pages_consumed += check_needs_page_ident(tables[1], idx[1], &free_pages);
tables[2] = reinterpret_cast<page_table *>(
tables[1]->entries[idx[1]] & ~0xfffull);
for (; idx[2] < 512; idx[2] += 1, idx[3] = 0) {
pages_consumed += check_needs_page_ident(tables[2], idx[2], &free_pages);
tables[3] = reinterpret_cast<page_table *>(
tables[2]->entries[idx[2]] & ~0xfffull);
for (; idx[3] < 512; idx[3] += 1) {
tables[3]->entries[idx[3]] = phys_addr | 0xb;
phys_addr += page_manager::page_size;
if (--count == 0) return pages_consumed;
}
}
}
}
kassert(0, "Ran to end of page_in_ident");
}
void void
memory_initialize_managers(const void *memory_map, size_t map_length, size_t desc_length) memory_initialize_managers(const void *memory_map, size_t map_length, size_t desc_length)
{ {
@@ -355,7 +409,9 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
// The bootloader reserved 16 pages for page tables, which we'll use to bootstrap. // The bootloader reserved 16 pages for page tables, which we'll use to bootstrap.
// The first one is the already-installed PML4, so grab it from CR3. // The first one is the already-installed PML4, so grab it from CR3.
page_table *tables = page_manager::get_pml4(); uint64_t cr3;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (cr3) );
page_table *tables = reinterpret_cast<page_table *>(cr3 & ~0xfffull);
// Now go through EFi's memory map and find a region of scratch space. // Now go through EFi's memory map and find a region of scratch space.
const unsigned want_pages = 32; const unsigned want_pages = 32;
@@ -375,7 +431,7 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
copy_new_table(&tables[0], fr_idx[0], &tables[1]); copy_new_table(&tables[0], fr_idx[0], &tables[1]);
copy_new_table(&tables[1], fr_idx[1], &tables[2]); copy_new_table(&tables[1], fr_idx[1], &tables[2]);
copy_new_table(&tables[2], fr_idx[2], &tables[3]); copy_new_table(&tables[2], fr_idx[2], &tables[3]);
page_in(&tables[0], free_region_start_phys, free_region_start_virt, want_pages, nullptr); page_in_ident(&tables[0], free_region_start_phys, free_region_start_virt, want_pages, nullptr);
// We now have pages starting at "free_next" to bootstrap ourselves. Start by // We now have pages starting at "free_next" to bootstrap ourselves. Start by
// taking inventory of free pages. // taking inventory of free pages.
@@ -385,6 +441,9 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
free_next, memory_map, map_length, desc_length, free_next, memory_map, map_length, desc_length,
&free_head, &used_head); &free_head, &used_head);
page_block::dump(used_head, "original used", true);
page_block::dump(free_head, "original free", true);
// Unused page_block structs go here - finish out the current page with them // Unused page_block structs go here - finish out the current page with them
page_block *cache_head = fill_page_with_blocks(free_next); page_block *cache_head = fill_page_with_blocks(free_next);
free_next = page_align(free_next); free_next = page_align(free_next);
@@ -414,7 +473,7 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
// Add it to the used list // Add it to the used list
removed->virtual_address = free_region_start_virt; removed->virtual_address = free_region_start_virt;
removed->flags = page_block_flags::used; removed->flags = page_block_flags::used | page_block_flags::mapped;
used_head = page_block::insert(used_head, removed); used_head = page_block::insert(used_head, removed);
// Pull out the block that represents the rest // Pull out the block that represents the rest
@@ -435,43 +494,45 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
// Record that we're about to remap it into the page table address space // Record that we're about to remap it into the page table address space
removed->virtual_address = pt_start_virt; removed->virtual_address = pt_start_virt;
removed->flags = page_block_flags::used; removed->flags = page_block_flags::used | page_block_flags::mapped;
used_head = page_block::insert(used_head, removed); used_head = page_block::insert(used_head, removed);
page_manager *pm = &g_page_manager;
// Actually remap them into page table space // Actually remap them into page table space
page_out(&tables[0], free_next, remaining_pages); pm->page_out(&tables[0], free_next, remaining_pages);
page_table_indices pg_idx{pt_start_virt}; page_table_indices pg_idx{pt_start_virt};
copy_new_table(&tables[0], pg_idx[0], &tables[4]); copy_new_table(&tables[0], pg_idx[0], &tables[4]);
copy_new_table(&tables[4], pg_idx[1], &tables[5]); copy_new_table(&tables[4], pg_idx[1], &tables[5]);
copy_new_table(&tables[5], pg_idx[2], &tables[6]); copy_new_table(&tables[5], pg_idx[2], &tables[6]);
page_in(&tables[0], pt_start_phys, pt_start_virt, remaining_pages, tables + 4); page_in_ident(&tables[0], pt_start_phys, pt_start_virt, remaining_pages, tables + 4);
// Ok, now build an acutal set of kernel page tables that just contains // Finally, build an acutal set of kernel page tables that just contains
// what the kernel actually has mapped. // what the kernel actually has mapped, but making everything writable
page_table *pages = reinterpret_cast<page_table *>(pt_start_virt);
unsigned consumed_pages = 1; // We're about to make a PML4, start with 1
// Finally, remap the existing mappings, but making everything writable
// (especially the page tables themselves) // (especially the page tables themselves)
page_table *pml4 = pages++; page_table *pml4 = reinterpret_cast<page_table *>(pt_start_virt);
for (int i=0; i<512; ++i) pml4->entries[i] = 0; for (int i=0; i<512; ++i) pml4->entries[i] = 0;
// Give the rest to the page_manager's cache for use in page_in
pm->free_table_pages(pml4 + 1, remaining_pages - 1);
for (page_block *cur = used_head; cur; cur = cur->next) { for (page_block *cur = used_head; cur; cur = cur->next) {
if (!cur->has_flag(page_block_flags::mapped)) continue; if (!cur->has_flag(page_block_flags::mapped)) continue;
consumed_pages += page_in(pml4, cur->physical_address, cur->virtual_address, pm->page_in(pml4, cur->physical_address, cur->virtual_address, cur->count);
cur->count, pages + consumed_pages);
} }
free_next += (consumed_pages * page_manager::page_size);
page_block::dump(used_head, "used", true);
page_block::dump(free_head, "free", true);
cons->printf("free_region_start: %lx [%3d]\n", free_region_start_virt, used_pages);
pml4->dump();
// Put our new PML4 into CR3 to start using it // Put our new PML4 into CR3 to start using it
// page_manager::set_pml4(pml4); page_manager::set_pml4(pml4);
// We now have all used memory mapped ourselves. Let the page_manager take // We now have all used memory mapped ourselves. Let the page_manager take
// over from here. // over from here.
g_page_manager.init( g_page_manager.init(free_head, used_head, cache_head);
free_head, used_head, cache_head,
free_region_start_virt, used_pages,
free_next, remaining_pages - consumed_pages);
} }

View File

@@ -5,6 +5,19 @@
page_manager g_page_manager; page_manager g_page_manager;
static uint64_t
pt_to_phys(page_table *pt)
{
return reinterpret_cast<uint64_t>(pt) - page_manager::page_offset;
}
static page_table *
pt_from_phys(uint64_t p)
{
return reinterpret_cast<page_table *>((p + page_manager::page_offset) & ~0xfffull);
}
struct free_page_header struct free_page_header
{ {
free_page_header *next; free_page_header *next;
@@ -102,12 +115,7 @@ void
page_block::dump(page_block *list, const char *name, bool show_unmapped) page_block::dump(page_block *list, const char *name, bool show_unmapped)
{ {
console *cons = console::get(); console *cons = console::get();
cons->puts("Block list"); cons->printf("Block list %s:\n", name);
if (name) {
cons->puts(" ");
cons->puts(name);
}
cons->puts(":\n");
int count = 0; int count = 0;
for (page_block *cur = list; cur; cur = cur->next) { for (page_block *cur = list; cur; cur = cur->next) {
@@ -115,25 +123,25 @@ page_block::dump(page_block *list, const char *name, bool show_unmapped)
if (!(show_unmapped || cur->has_flag(page_block_flags::mapped))) if (!(show_unmapped || cur->has_flag(page_block_flags::mapped)))
continue; continue;
cons->puts(" ["); cons->printf(" %lx %x [%6d]",
cons->put_hex((uint64_t)cur); cur->physical_address,
cons->puts("] "); cur->flags,
cons->put_hex(cur->physical_address); cur->count);
cons->puts(" ");
cons->put_hex((uint32_t)cur->flags);
if (cur->virtual_address) { if (cur->virtual_address) {
cons->puts(" "); page_table_indices start{cur->virtual_address};
cons->put_hex(cur->virtual_address); page_table_indices end{cur->virtual_address + cur->count * page_manager::page_size - 1};
cons->printf(" %lx (%d,%d,%d,%d)-(%d,%d,%d,%d)",
cur->virtual_address,
start[0], start[1], start[2], start[3],
end[0], end[1], end[2], end[3]);
} }
cons->puts(" [");
cons->put_dec(cur->count); cons->printf("\n");
cons->puts("]\n");
} }
cons->puts(" Total: "); cons->printf(" Total: %d\n");
cons->put_dec(count);
cons->puts("\n");
} }
void void
@@ -170,11 +178,7 @@ void
page_manager::init( page_manager::init(
page_block *free, page_block *free,
page_block *used, page_block *used,
page_block *block_cache, page_block *block_cache)
uint64_t scratch_start,
uint64_t scratch_pages,
uint64_t page_table_start,
uint64_t page_table_pages)
{ {
m_free = free; m_free = free;
m_used = used; m_used = used;
@@ -184,24 +188,56 @@ page_manager::init(
// allocated, full of page_block structs. Eventually hand // allocated, full of page_block structs. Eventually hand
// control of that to a slab allocator. // control of that to a slab allocator.
m_page_cache = nullptr; page_table *pml4 = get_pml4();
for (unsigned i = 0; i < page_table_pages; ++i) {
uint64_t addr = page_table_start + (i * page_size); // Fix up the offset-marked pointers
free_page_header *header = reinterpret_cast<free_page_header *>(addr); for (unsigned i = 0; i < m_marked_pointer_count; ++i) {
header->count = 1; uint64_t p = reinterpret_cast<uint64_t>(m_marked_pointers[i]);
header->next = m_page_cache; uint64_t v = p + page_offset;
m_page_cache = header; uint64_t c = (m_marked_pointer_lengths[i] / page_size) + 1;
// TODO: cleanly search/split this as a block out of used/free if possible
page_block *block = get_block();
// TODO: page-align
block->physical_address = p;
block->virtual_address = v;
block->count = c;
block->flags =
page_block_flags::used |
page_block_flags::mapped |
page_block_flags::mmio;
m_used = page_block::insert(m_used, block);
page_in(pml4, p, v, c);
} }
console *cons = console::get();
consolidate_blocks(); consolidate_blocks();
page_block::dump(m_used, "used before map", true);
//map_pages(0xf0000000 + high_offset, 120); //map_pages(0xf0000000 + high_offset, 120);
} }
void
page_manager::mark_offset_pointer(void **pointer, size_t length)
{
m_marked_pointers[m_marked_pointer_count] = pointer;
m_marked_pointer_lengths[m_marked_pointer_count++] = length;
}
page_block *
page_manager::get_block()
{
page_block *block = m_block_cache;
if (block) {
m_block_cache = block->next;
block->next = 0;
return block;
} else {
kassert(0, "NYI: page_manager::get_block() needed to allocate.");
}
}
void void
page_manager::free_blocks(page_block *block) page_manager::free_blocks(page_block *block)
{ {
@@ -217,19 +253,38 @@ page_manager::free_blocks(page_block *block)
m_block_cache = block; m_block_cache = block;
} }
page_block * page_table *
page_manager::get_block() page_manager::get_table_page()
{ {
page_block *block = m_block_cache; free_page_header *page = m_page_cache;
if (block) { if (page) {
m_block_cache = block->next; m_page_cache = page->next;
block->next = 0; return reinterpret_cast<page_table *>(page);
return block;
} else { } else {
kassert(0, "NYI: page_manager::get_block() needed to allocate."); kassert(0, "NYI: page_manager::get_table_page() needed to allocate.");
} }
} }
void
page_manager::free_table_pages(void *pages, size_t count)
{
uint64_t start = reinterpret_cast<uint64_t>(pages);
for (size_t i = 0; i < count; ++i) {
uint64_t addr = start + (i * page_size);
free_page_header *header = reinterpret_cast<free_page_header *>(addr);
header->count = 1;
header->next = m_page_cache;
m_page_cache = header;
}
}
void
page_manager::consolidate_blocks()
{
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_free));
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_used));
}
void * void *
page_manager::map_pages(uint64_t address, unsigned count) page_manager::map_pages(uint64_t address, unsigned count)
{ {
@@ -302,63 +357,37 @@ page_manager::unmap_pages(uint64_t address, unsigned count)
} }
void void
page_manager::consolidate_blocks() page_manager::check_needs_page(page_table *table, unsigned index)
{ {
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_free)); if (table->entries[index] & 0x1 == 1) return;
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_used));
}
static unsigned page_table *new_table = get_table_page();
check_needs_page(page_table *table, unsigned index, page_table **free_pages)
{
if (table->entries[index] & 0x1 == 1) return 0;
kassert(*free_pages, "check_needs_page needed to allocate but had no free pages");
page_table *new_table = (*free_pages)++;
for (int i=0; i<512; ++i) new_table->entries[i] = 0; for (int i=0; i<512; ++i) new_table->entries[i] = 0;
table->entries[index] = reinterpret_cast<uint64_t>(new_table) | 0xb; table->entries[index] = pt_to_phys(new_table) | 0xb;
return 1;
} }
static uint64_t void
pt_to_phys(page_table *pt) page_manager::page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count)
{
return reinterpret_cast<uint64_t>(pt) - page_manager::page_offset;
}
static page_table *
pt_from_phys(uint64_t p)
{
return reinterpret_cast<page_table *>((p + page_manager::page_offset) & ~0xfffull);
}
unsigned
page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count, page_table *free_pages)
{ {
page_table_indices idx{virt_addr}; page_table_indices idx{virt_addr};
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
unsigned pages_consumed = 0;
for (; idx[0] < 512; idx[0] += 1) { for (; idx[0] < 512; idx[0] += 1) {
pages_consumed += check_needs_page(tables[0], idx[0], &free_pages); check_needs_page(tables[0], idx[0]);
tables[1] = reinterpret_cast<page_table *>( tables[1] = tables[0]->get(idx[0]);
tables[0]->entries[idx[0]] & ~0xfffull);
for (; idx[1] < 512; idx[1] += 1, idx[2] = 0, idx[3] = 0) { for (; idx[1] < 512; idx[1] += 1, idx[2] = 0, idx[3] = 0) {
pages_consumed += check_needs_page(tables[1], idx[1], &free_pages); check_needs_page(tables[1], idx[1]);
tables[2] = reinterpret_cast<page_table *>( tables[2] = tables[1]->get(idx[1]);
tables[1]->entries[idx[1]] & ~0xfffull);
for (; idx[2] < 512; idx[2] += 1, idx[3] = 0) { for (; idx[2] < 512; idx[2] += 1, idx[3] = 0) {
pages_consumed += check_needs_page(tables[2], idx[2], &free_pages); check_needs_page(tables[2], idx[2]);
tables[3] = reinterpret_cast<page_table *>( tables[3] = tables[2]->get(idx[2]);
tables[2]->entries[idx[2]] & ~0xfffull);
for (; idx[3] < 512; idx[3] += 1) { for (; idx[3] < 512; idx[3] += 1) {
tables[3]->entries[idx[3]] = phys_addr | 0xb; tables[3]->entries[idx[3]] = phys_addr | 0xb;
phys_addr += page_manager::page_size; phys_addr += page_manager::page_size;
if (--count == 0) return pages_consumed; if (--count == 0) return;
} }
} }
} }
@@ -368,7 +397,7 @@ page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count
} }
void void
page_out(page_table *pml4, uint64_t virt_addr, uint64_t count) page_manager::page_out(page_table *pml4, uint64_t virt_addr, uint64_t count)
{ {
page_table_indices idx{virt_addr}; page_table_indices idx{virt_addr};
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
@@ -396,3 +425,44 @@ page_out(page_table *pml4, uint64_t virt_addr, uint64_t count)
kassert(0, "Ran to end of page_out"); kassert(0, "Ran to end of page_out");
} }
void
page_table::dump(int level, uint64_t offset)
{
console *cons = console::get();
cons->printf("Level %d page table @ %lx (off %lx):\n", level, this, offset);
for (int i=0; i<512; ++i) {
uint64_t ent = entries[i];
if (ent == 0) continue;
cons->printf(" %3d: %lx ", i, ent);
if (ent & 0x1 == 0) {
cons->printf(" NOT PRESENT\n");
continue;
}
if ((level == 2 || level == 3) && (ent & 0x80) == 0x80) {
cons->printf(" -> Large page at %lx\n", ent & ~0xfffull);
continue;
} else if (level == 1) {
cons->printf(" -> Page at %lx\n", (ent & ~0xfffull));
} else {
cons->printf(" -> Level %d table at %lx\n", level - 1, (ent & ~0xfffull) + offset);
continue;
}
}
cons->printf("\n");
if (--level > 0) {
for (int i=0; i<512; ++i) {
uint64_t ent = entries[i];
if ((ent & 0x1) == 0) continue;
if ((ent & 0x80)) continue;
page_table *next = reinterpret_cast<page_table *>((ent & ~0xffful) + offset);
next->dump(level, offset);
}
}
}

View File

@@ -38,6 +38,12 @@ public:
/// \arg count The number of pages to unmap /// \arg count The number of pages to unmap
void unmap_pages(uint64_t address, unsigned count); void unmap_pages(uint64_t address, unsigned count);
/// Mark a pointer and range to be offset-mapped. This pointer will
/// automatically get updated once page_manager::init() is called.
/// \arg pointer Pointer to a pointer to the memory area to be mapped
/// \arg length Length of the memory area to be mapped
void mark_offset_pointer(void **pointer, size_t length);
private: private:
friend void memory_initialize_managers(const void *, size_t, size_t); friend void memory_initialize_managers(const void *, size_t, size_t);
@@ -45,11 +51,7 @@ private:
void init( void init(
page_block *free, page_block *free,
page_block *used, page_block *used,
page_block *block_cache, page_block *block_cache);
uint64_t scratch_start,
uint64_t scratch_pages,
uint64_t page_table_start,
uint64_t page_table_pages);
/// Initialize the virtual memory manager based on this object's state /// Initialize the virtual memory manager based on this object's state
void init_memory_manager(); void init_memory_manager();
@@ -62,6 +64,15 @@ private:
/// \arg block A list of `page_block` structs /// \arg block A list of `page_block` structs
void free_blocks(page_block *block); void free_blocks(page_block *block);
/// Allocate a page for a page table, or pull one from the cache
/// \returns An empty page mapped in page space
page_table * get_table_page();
/// Return a set of mapped contiguous pages to the page cache.
/// \arg pages Pointer to the first page to be returned
/// \arg count Number of pages in the range
void free_table_pages(void *pages, size_t count);
/// Consolidate the free and used block lists. Return freed blocks /// Consolidate the free and used block lists. Return freed blocks
/// to the cache. /// to the cache.
void consolidate_blocks(); void consolidate_blocks();
@@ -72,24 +83,55 @@ private:
{ {
uint64_t pml4 = 0; uint64_t pml4 = 0;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) ); __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) );
pml4 &= ~0xfffull; return reinterpret_cast<page_table *>((pml4 & ~0xfffull) + page_offset);
return reinterpret_cast<page_table *>(pml4);
} }
/// Helper to set the PML4 table pointer in CR3. /// Helper to set the PML4 table pointer in CR3.
/// \arg pml4 A pointer to the PML4 table to install. /// \arg pml4 A pointer to the PML4 table to install.
static inline void set_pml4(page_table *pml4) static inline void set_pml4(page_table *pml4)
{ {
__asm__ __volatile__ ( "mov %0, %%cr3" :: uint64_t p = reinterpret_cast<uint64_t>(pml4) - page_offset;
"r" (reinterpret_cast<uint64_t>(pml4) & ~0xfffull) ); __asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p & ~0xfffull) );
} }
/// Helper function to allocate a new page table. If table entry `i` in
/// table `base` is empty, allocate a new page table and point `base[i]` at
/// it.
/// \arg base Existing page table being indexed into
/// \arg i Index into the existing table to check
void check_needs_page(page_table *base, unsigned i);
/// Low-level routine for mapping a number of pages into the given page table.
/// \arg pml4 The root page table to map into
/// \arg phys_addr The starting physical address of the pages to be mapped
/// \arg virt_addr The starting virtual address ot the memory to be mapped
/// \arg count The number of pages to map
void page_in(
page_table *pml4,
uint64_t phys_addr,
uint64_t virt_addr,
uint64_t count);
/// Low-level routine for unmapping a number of pages from the given page table.
/// \arg pml4 The root page table for this mapping
/// \arg virt_addr The starting virtual address ot the memory to be unmapped
/// \arg count The number of pages to unmap
void page_out(
page_table *pml4,
uint64_t virt_addr,
uint64_t count);
page_block *m_free; ///< Free pages list page_block *m_free; ///< Free pages list
page_block *m_used; ///< In-use pages list page_block *m_used; ///< In-use pages list
page_block *m_block_cache; ///< Cache of unused page_block structs page_block *m_block_cache; ///< Cache of unused page_block structs
free_page_header *m_page_cache; ///< Cache of free pages to use for tables free_page_header *m_page_cache; ///< Cache of free pages to use for tables
static const unsigned marked_pointer_max = 16;
unsigned m_marked_pointer_count;
void **m_marked_pointers[marked_pointer_max];
size_t m_marked_pointer_lengths[marked_pointer_max];
page_manager(const page_manager &) = delete; page_manager(const page_manager &) = delete;
}; };
@@ -102,10 +144,12 @@ enum class page_block_flags : uint32_t
free = 0x00000000, ///< Not a flag, value for free memory free = 0x00000000, ///< Not a flag, value for free memory
used = 0x00000001, ///< Memory is in use used = 0x00000001, ///< Memory is in use
mapped = 0x00000002, ///< Memory is mapped to virtual address mapped = 0x00000002, ///< Memory is mapped to virtual address
pending_free = 0x00000004, ///< Memory should be freed
nonvolatile = 0x00000010, ///< Memory is non-volatile storage mmio = 0x00000010, ///< Memory is a MMIO region
acpi_wait = 0x00000020, ///< Memory should be freed after ACPI init nonvolatile = 0x00000020, ///< Memory is non-volatile storage
pending_free = 0x10000000, ///< Memory should be freed
acpi_wait = 0x40000000, ///< Memory should be freed after ACPI init
permanent = 0x80000000, ///< Memory is permanently unusable permanent = 0x80000000, ///< Memory is permanently unusable
max_flags max_flags
@@ -186,10 +230,21 @@ struct page_block
/// Struct to allow easy accessing of a memory page being used as a page table. /// Struct to allow easy accessing of a memory page being used as a page table.
struct page_table struct page_table
{ {
using pm = page_manager;
uint64_t entries[512]; uint64_t entries[512];
inline page_table * next(int i) const {
return reinterpret_cast<page_table *>(entries[i] & ~0xfffull); inline page_table * get(int i) const {
uint64_t entry = entries[i];
if ((entry & 0x1) == 0) return nullptr;
return reinterpret_cast<page_table *>((entry & ~0xfffull) + pm::page_offset);
} }
inline void set(int i, page_table *p, uint16_t flags) {
entries[i] = (reinterpret_cast<uint64_t>(p) - pm::page_offset) | (flags & 0xfff);
}
void dump(int level = 4, uint64_t offset = page_manager::page_offset);
}; };
@@ -224,25 +279,3 @@ template <typename T> inline T page_align(T p)
/// \returns The next page-table-aligned address _after_ `p`. /// \returns The next page-table-aligned address _after_ `p`.
template <typename T> inline T page_table_align(T p) { return ((p - 1) & ~0x1fffffull) + 0x200000; } template <typename T> inline T page_table_align(T p) { return ((p - 1) & ~0x1fffffull) + 0x200000; }
/// Low-level routine for mapping a number of pages into the given page table.
/// \arg pml4 The root page table to map into
/// \arg phys_addr The starting physical address of the pages to be mapped
/// \arg virt_addr The starting virtual address ot the memory to be mapped
/// \arg count The number of pages to map
/// \arg free_pages A pointer to a list of free, mapped pages to use for new page tables.
/// \returns The number of pages consumed from `free_pages`.
unsigned page_in(
page_table *pml4,
uint64_t phys_addr,
uint64_t virt_addr,
uint64_t count,
page_table *free_pages);
/// Low-level routine for unmapping a number of pages from the given page table.
/// \arg pml4 The root page table for this mapping
/// \arg virt_addr The starting virtual address ot the memory to be unmapped
/// \arg count The number of pages to unmap
void page_out(
page_table *pml4,
uint64_t virt_addr,
uint64_t count);