Initialize page_manager.

Page manager now:
- Caches mapped pages and page_block structs
- Can unmap memory ranges
- Unmaps extra kernel memory during it's init
This commit is contained in:
Justin C. Miller
2018-04-22 02:48:45 -07:00
parent 07fd3abe2c
commit 95d52b87f4
5 changed files with 397 additions and 115 deletions

View File

@@ -102,12 +102,6 @@ desc_incr(const efi_memory_descriptor *d, size_t desc_length)
reinterpret_cast<const uint8_t *>(d) + desc_length);
}
struct page_table
{
uint64_t entries[512];
page_table * next(int i) const { return reinterpret_cast<page_table *>(entries[i] & ~0xfffull); }
};
static unsigned
count_table_pages_needed(page_block *used)
{
@@ -145,7 +139,8 @@ count_table_pages_needed(page_block *used)
uint64_t
gather_block_lists(
uint64_t scratch,
uint64_t scratch_phys,
uint64_t scratch_virt,
const void *memory_map,
size_t map_length,
size_t desc_length,
@@ -156,7 +151,7 @@ gather_block_lists(
page_block **free = free_head;
page_block **used = used_head;
page_block *block_list = reinterpret_cast<page_block *>(scratch);
page_block *block_list = reinterpret_cast<page_block *>(scratch_virt);
efi_memory_descriptor const *desc = reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length);
@@ -176,10 +171,10 @@ gather_block_lists(
case efi_memory_type::boot_services_code:
case efi_memory_type::boot_services_data:
case efi_memory_type::available:
if (scratch >= block->physical_address && scratch < block->physical_end()) {
if (scratch_phys >= block->physical_address && scratch_phys < block->physical_end()) {
// This is the scratch memory block, split off what we're not using
block->virtual_address = block->physical_address + page_manager::high_offset;
block->flags = page_block_flags::used | page_block_flags::mapped;
block->flags = page_block_flags::used;
if (block->count > 1024) {
page_block *rest = &block_list[i++];
@@ -227,65 +222,21 @@ gather_block_lists(
return reinterpret_cast<uint64_t>(&block_list[i]);
}
unsigned check_needs_page(page_table *table, unsigned index, page_table **free_pages)
{
if (table->entries[index] & 0x1 == 1) return 0;
kassert(*free_pages, "check_needs_page needed to allocate but had no free pages");
page_table *new_table = (*free_pages)++;
for (int i=0; i<512; ++i) new_table->entries[i] = 0;
table->entries[index] = reinterpret_cast<uint64_t>(new_table) | 0xb;
return 1;
}
unsigned page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count, page_table *free_pages)
{
page_table_indices idx{virt_addr};
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
unsigned pages_consumed = 0;
for (; idx[0] < 512; idx[0] += 1) {
pages_consumed += check_needs_page(tables[0], idx[0], &free_pages);
tables[1] = reinterpret_cast<page_table *>(
tables[0]->entries[idx[0]] & ~0xfffull);
for (; idx[1] < 512; idx[1] += 1) {
pages_consumed += check_needs_page(tables[1], idx[1], &free_pages);
tables[2] = reinterpret_cast<page_table *>(
tables[1]->entries[idx[1]] & ~0xfffull);
for (; idx[2] < 512; idx[2] += 1) {
pages_consumed += check_needs_page(tables[2], idx[2], &free_pages);
tables[3] = reinterpret_cast<page_table *>(
tables[2]->entries[idx[2]] & ~0xfffull);
for (; idx[3] < 512; idx[3] += 1) {
tables[3]->entries[idx[3]] = phys_addr | 0xb;
phys_addr += page_manager::page_size;
if (--count == 0) return pages_consumed;
}
}
}
}
kassert(0, "Ran to end of page_in");
}
page_block *
fill_page_with_blocks(uint64_t start) {
uint64_t space = page_align(start) - start;
uint64_t count = space / sizeof(page_block);
uint64_t end = page_align(start);
page_block *blocks = reinterpret_cast<page_block *>(start);
kutil::memset(blocks, 0, sizeof(page_block)*count);
page_block *endp = reinterpret_cast<page_block *>(end - sizeof(page_block));
if (blocks >= endp)
return nullptr;
page_block *head = nullptr, **insert = &head;
for (unsigned i = 0; i < count; ++i) {
*insert = &blocks[i];
insert = &blocks[i].next;
page_block *cur = blocks;
while (cur < endp) {
cur->zero(cur + 1);
cur += 1;
}
return head;
cur->next = 0;
return blocks;
}
void
@@ -314,16 +265,14 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space.");
// Offset-map this region into the higher half.
uint64_t free_region_start = desc->physical_start;
uint64_t free_region = page_table_align(free_region_start);
uint64_t next_free = free_region + page_manager::high_offset;
cons->puts("Skipping ");
cons->put_dec(free_region - free_region_start);
cons->puts(" bytes to get page-table-aligned.\n");
uint64_t free_start_phys = desc->physical_start;
uint64_t free_start = free_start_phys + page_manager::high_offset;
uint64_t free_aligned_phys = page_table_align(free_start_phys);
uint64_t free_next = free_aligned_phys + page_manager::high_offset;
// We'll need to copy any existing tables (except the PML4 which the
// bootloader gave us) into our 4 reserved pages so we can edit them.
page_table_indices fr_idx{free_region};
page_table_indices fr_idx{free_aligned_phys};
fr_idx[0] += 256; // Flip the highest bit of the address
if (tables[0].entries[fr_idx[0]] & 0x1) {
@@ -344,28 +293,31 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
// No need to copy the last-level page table, we're overwriting the whole thing
tables[2].entries[fr_idx[2]] = reinterpret_cast<uint64_t>(&tables[3]) | 0xb;
page_in(&tables[0], free_region, next_free, 512, nullptr);
page_in(&tables[0], free_aligned_phys, free_next, 512, nullptr);
// We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by
// We now have 2MiB starting at "free_aligned_phys" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_block *free_head = nullptr;
page_block *used_head = nullptr;
next_free = gather_block_lists(next_free, memory_map, map_length, desc_length,
free_next = gather_block_lists(
free_aligned_phys, free_next,
memory_map, map_length, desc_length,
&free_head, &used_head);
// Unused page_block structs go here - finish out the current page with them
page_block *cache_head = fill_page_with_blocks(next_free);
next_free = page_align(next_free);
page_block *cache_head = fill_page_with_blocks(free_next);
free_next = page_align(free_next);
// Now go back through these lists and consolidate
page_block **cache = &cache_head;
*cache = free_head->list_consolidate();
while (*cache) cache = &(*cache)->next;
*cache = used_head->list_consolidate();
page_block *freed = free_head->list_consolidate();
cache_head->list_append(freed);
freed = used_head->list_consolidate();
cache_head->list_append(freed);
// Ok, now build an acutal set of kernel page tables that just contains
// what the kernel actually has mapped.
page_table *pages = reinterpret_cast<page_table *>(next_free);
page_table *pages = reinterpret_cast<page_table *>(free_next);
unsigned consumed_pages = 1; // We're about to make a PML4, start with 1:w
// Finally, remap the existing mappings, but making everything writable
@@ -378,11 +330,14 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des
consumed_pages += page_in(pml4, cur->physical_address, cur->virtual_address,
cur->count, pages + consumed_pages);
}
next_free += (consumed_pages * page_manager::page_size);
free_next += (consumed_pages * page_manager::page_size);
// Put our new PML4 into CR3 to start using it
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) );
// We now have all used memory mapped ourselves. Let the page_manager take
// over from here.
g_page_manager.init(
free_head, used_head, cache_head,
free_region_start, 1024, next_free);
free_start, 1024, free_next);
}