Enable allocation and mapping of pages

This commit is contained in:
Justin C. Miller
2018-04-26 11:10:32 -07:00
parent a2665d9247
commit 34c894b15d
2 changed files with 86 additions and 7 deletions

View File

@@ -1,3 +1,5 @@
#include <algorithm>
#include "assert.h" #include "assert.h"
#include "console.h" #include "console.h"
#include "memory_pages.h" #include "memory_pages.h"
@@ -214,7 +216,11 @@ page_manager::init(
consolidate_blocks(); consolidate_blocks();
//map_pages(0xf0000000 + high_offset, 120); page_block::dump(m_used, "used before map", true);
page_block::dump(m_free, "free before map", true);
map_pages(0xf0000000 + high_offset, 120);
page_block::dump(m_used, "used after map", true);
page_block::dump(m_free, "free after map", true);
} }
@@ -256,13 +262,36 @@ page_manager::free_blocks(page_block *block)
page_table * page_table *
page_manager::get_table_page() page_manager::get_table_page()
{ {
free_page_header *page = m_page_cache; if (!m_page_cache) {
if (page) { uint64_t phys = 0;
m_page_cache = page->next; uint64_t n = pop_pages(32, &phys);
return reinterpret_cast<page_table *>(page); uint64_t virt = phys + page_offset;
} else {
kassert(0, "NYI: page_manager::get_table_page() needed to allocate."); page_block *block = get_block();
block->physical_address = phys;
block->virtual_address = virt;
block->count = n;
page_block::insert(m_used, block);
page_in(get_pml4(), phys, virt, n);
m_page_cache = reinterpret_cast<free_page_header *>(virt);
// The last one needs to be null, so do n-1
uint64_t end = virt + (n-1) * page_size;
while (virt < end) {
reinterpret_cast<free_page_header *>(virt)->next =
reinterpret_cast<free_page_header *>(virt + page_size);
virt += page_size;
}
reinterpret_cast<free_page_header *>(virt)->next = nullptr;
g_console.printf("Mappd %d new page table pages at %lx\n", n, phys);
} }
free_page_header *page = m_page_cache;
m_page_cache = page->next;
return reinterpret_cast<page_table *>(page);
} }
void void
@@ -288,7 +317,28 @@ page_manager::consolidate_blocks()
void * void *
page_manager::map_pages(uint64_t address, unsigned count) page_manager::map_pages(uint64_t address, unsigned count)
{ {
void *ret = reinterpret_cast<void *>(address);
page_table *pml4 = get_pml4(); page_table *pml4 = get_pml4();
while (count) {
kassert(m_free, "page_manager::map_pages ran out of free pages!");
uint64_t phys = 0;
size_t n = pop_pages(count, &phys);
page_block *block = get_block();
block->physical_address = phys;
block->virtual_address = address;
block->count = n;
page_block::insert(m_used, block);
page_in(pml4, phys, address, n);
address += n * page_size;
count -= n;
}
return ret;
} }
void void
@@ -425,6 +475,27 @@ page_manager::page_out(page_table *pml4, uint64_t virt_addr, uint64_t count)
kassert(0, "Ran to end of page_out"); kassert(0, "Ran to end of page_out");
} }
size_t
page_manager::pop_pages(size_t count, uint64_t *address)
{
kassert(m_free, "page_manager::pop_pages ran out of free pages!");
unsigned n = std::min(count, static_cast<size_t>(m_free->count));
*address = m_free->physical_address;
m_free->physical_address += n * page_size;
m_free->count -= n;
if (m_free->count == 0) {
page_block *block = m_free;
m_free = m_free->next;
block->zero(m_block_cache);
m_block_cache = block;
}
return n;
}
void void
page_table::dump(int level, uint64_t offset) page_table::dump(int level, uint64_t offset)

View File

@@ -121,6 +121,14 @@ private:
uint64_t virt_addr, uint64_t virt_addr,
uint64_t count); uint64_t count);
/// Get free pages from the free list. Only pages from the first free block
/// are returned, so the number may be less than requested, but they will
/// be contiguous. Pages will not be mapped into virtual memory.
/// \arg count The maximum number of pages to get
/// \arg address [out] The address of the first page
/// \returns The number of pages retrieved
size_t pop_pages(size_t count, uint64_t *address);
page_block *m_free; ///< Free pages list page_block *m_free; ///< Free pages list
page_block *m_used; ///< In-use pages list page_block *m_used; ///< In-use pages list