Refactor memory code.

Break out some more bootstrap code into functions. Add the start of
some Doxygen doc comments to help organize my thoughts.
This commit is contained in:
Justin C. Miller
2018-04-21 16:49:39 -07:00
parent 9a45ea562b
commit 4a38a74b16
3 changed files with 115 additions and 95 deletions

View File

@@ -142,76 +142,23 @@ count_table_pages_needed(page_block *used)
} }
void uint64_t
memory_manager::create(const void *memory_map, size_t map_length, size_t desc_length) gather_block_lists(
uint64_t scratch,
const void *memory_map,
size_t map_length,
size_t desc_length,
page_block **free_head,
page_block **used_head)
{ {
console *cons = console::get(); int i = 0;
page_block **free = free_head;
page_block **used = used_head;
// The bootloader reserved 4 pages for page tables, which we'll use to bootstrap. page_block *block_list = reinterpret_cast<page_block *>(scratch);
// The first one is the already-installed PML4, so grab it from CR3. efi_memory_descriptor const *desc = reinterpret_cast<efi_memory_descriptor const *>(memory_map);
page_table *tables = nullptr;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (tables) );
// Now go through EFi's memory map and find a 4MiB region of free space to
// use as a scratch space. We'll use the 2MiB that fits naturally aligned
// into a single page table.
efi_memory_descriptor const *desc =
reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length); efi_memory_descriptor const *end = desc_incr(desc, map_length);
while (desc < end) {
if (desc->type == efi_memory_type::available && desc->pages >= 1024)
break;
desc = desc_incr(desc, desc_length);
}
kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space.");
uint64_t free_region = (desc->physical_start & 0x1fffff) == 0 ?
desc->physical_start :
desc->physical_start + 0x1fffff & ~0x1fffffull;
// Offset-map this region into the higher half.
uint64_t next_free = free_region + 0xffff800000000000;
cons->puts("Found region: ");
cons->put_hex(free_region);
cons->puts("\n");
// We'll need to copy any existing tables (except the PML4 which the
// bootloader gave us) into our 4 reserved pages so we can edit them.
page_table_indices fr_idx{free_region};
fr_idx[0] += 256; // Flip the highest bit of the address
if (tables[0].entries[fr_idx[0]] & 0x1) {
page_table *old_pdpt = tables[0].next(fr_idx[0]);
for (int i = 0; i < 512; ++i) tables[1].entries[i] = old_pdpt->entries[i];
} else {
for (int i = 0; i < 512; ++i) tables[1].entries[i] = 0;
}
tables[0].entries[fr_idx[0]] = reinterpret_cast<uint64_t>(&tables[1]) | 0xb;
if (tables[1].entries[fr_idx[1]] & 0x1) {
page_table *old_pdt = tables[1].next(fr_idx[1]);
for (int i = 0; i < 512; ++i) tables[2].entries[i] = old_pdt->entries[i];
} else {
for (int i = 0; i < 512; ++i) tables[2].entries[i] = 0;
}
tables[1].entries[fr_idx[1]] = reinterpret_cast<uint64_t>(&tables[2]) | 0xb;
for (int i = 0; i < 512; ++i)
tables[3].entries[i] = (free_region + 0x1000 * i) | 0xb;
tables[2].entries[fr_idx[2]] = reinterpret_cast<uint64_t>(&tables[3]) | 0xb;
// We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_block *block_list = reinterpret_cast<page_block *>(next_free);
int i = 0;
page_block *free_head = nullptr, **free = &free_head;
page_block *used_head = nullptr, **used = &used_head;
desc = reinterpret_cast<efi_memory_descriptor const *>(memory_map);
while (desc < end) { while (desc < end) {
page_block *block = &block_list[i++]; page_block *block = &block_list[i++];
block->physical_address = desc->physical_start; block->physical_address = desc->physical_start;
@@ -228,18 +175,16 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le
case efi_memory_type::boot_services_code: case efi_memory_type::boot_services_code:
case efi_memory_type::boot_services_data: case efi_memory_type::boot_services_data:
case efi_memory_type::available: case efi_memory_type::available:
if (free_region >= block->physical_address && free_region < block->end()) { if (scratch >= block->physical_address && scratch < block->physical_end()) {
// This is the scratch memory block, split off what we're not using // This is the scratch memory block, split off what we're not using
block->virtual_address = block->physical_address + 0xffff800000000000; block->virtual_address = block->physical_address + 0xffff800000000000;
block->flags = page_block_flags::used | page_block_flags::mapped;
block->flags = page_block_flags::used
| page_block_flags::mapped
| page_block_flags::pending_free;
if (block->count > 1024) { if (block->count > 1024) {
page_block *rest = &block_list[i++]; page_block *rest = &block_list[i++];
rest->physical_address = desc->physical_start + (1024*0x1000); rest->physical_address = desc->physical_start + (1024*0x1000);
rest->virtual_address = 0; rest->virtual_address = 0;
rest->flags = page_block_flags::free;
rest->count = desc->pages - 1024; rest->count = desc->pages - 1024;
rest->next = nullptr; rest->next = nullptr;
*free = rest; *free = rest;
@@ -278,9 +223,71 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le
desc = desc_incr(desc, desc_length); desc = desc_incr(desc, desc_length);
} }
// Update the pointer to the next free page return reinterpret_cast<uint64_t>(&block_list[i]);
next_free += i * sizeof(page_block); }
next_free = ((next_free - 1) & ~0xfffull) + 0x1000;
void
memory_manager::create(const void *memory_map, size_t map_length, size_t desc_length)
{
console *cons = console::get();
// The bootloader reserved 4 pages for page tables, which we'll use to bootstrap.
// The first one is the already-installed PML4, so grab it from CR3.
page_table *tables = nullptr;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (tables) );
// Now go through EFi's memory map and find a 4MiB region of free space to
// use as a scratch space. We'll use the 2MiB that fits naturally aligned
// into a single page table.
efi_memory_descriptor const *desc =
reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length);
while (desc < end) {
if (desc->type == efi_memory_type::available && desc->pages >= 1024)
break;
desc = desc_incr(desc, desc_length);
}
kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space.");
uint64_t free_region = page_table_align(desc->physical_start);
// Offset-map this region into the higher half.
uint64_t next_free = free_region + 0xffff800000000000;
// We'll need to copy any existing tables (except the PML4 which the
// bootloader gave us) into our 4 reserved pages so we can edit them.
page_table_indices fr_idx{free_region};
fr_idx[0] += 256; // Flip the highest bit of the address
if (tables[0].entries[fr_idx[0]] & 0x1) {
page_table *old_pdpt = tables[0].next(fr_idx[0]);
for (int i = 0; i < 512; ++i) tables[1].entries[i] = old_pdpt->entries[i];
} else {
for (int i = 0; i < 512; ++i) tables[1].entries[i] = 0;
}
tables[0].entries[fr_idx[0]] = reinterpret_cast<uint64_t>(&tables[1]) | 0xb;
if (tables[1].entries[fr_idx[1]] & 0x1) {
page_table *old_pdt = tables[1].next(fr_idx[1]);
for (int i = 0; i < 512; ++i) tables[2].entries[i] = old_pdt->entries[i];
} else {
for (int i = 0; i < 512; ++i) tables[2].entries[i] = 0;
}
tables[1].entries[fr_idx[1]] = reinterpret_cast<uint64_t>(&tables[2]) | 0xb;
for (int i = 0; i < 512; ++i)
tables[3].entries[i] = (free_region + 0x1000 * i) | 0xb;
tables[2].entries[fr_idx[2]] = reinterpret_cast<uint64_t>(&tables[3]) | 0xb;
// We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_block *free_head = nullptr;
page_block *used_head = nullptr;
next_free = gather_block_lists(next_free, memory_map, map_length, desc_length,
&free_head, &used_head);
next_free = page_align(next_free);
// Now go back through these lists and consolidate // Now go back through these lists and consolidate
free_head->list_consolidate(); free_head->list_consolidate();
@@ -290,7 +297,7 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le
// what the kernel actually has mapped. // what the kernel actually has mapped.
unsigned table_page_count = count_table_pages_needed(used_head); unsigned table_page_count = count_table_pages_needed(used_head);
cons->puts("To map currently-mapped pages, we need "); page_table *pages = reinterpret_cast<page_table *>(next_free);
cons->put_dec(table_page_count); next_free += table_page_count * 0x1000;
cons->puts(" pages of tables.\n");
} }

View File

@@ -11,7 +11,7 @@ page_block::list_consolidate()
page_block *next = cur->next; page_block *next = cur->next;
if (next && cur->flags == next->flags && if (next && cur->flags == next->flags &&
cur->end() == next->physical_address) cur->physical_end() == next->physical_address)
{ {
cur->count += next->count; cur->count += next->count;
cur->next = next->next; cur->next = next->next;
@@ -60,15 +60,3 @@ page_block::list_dump(const char *name)
cons->put_dec(count); cons->put_dec(count);
cons->puts("\n"); cons->puts("\n");
} }
void
page_table_indices::dump()
{
console *cons = console::get();
cons->puts("{");
for (int i = 0; i < 4; ++i) {
if (i) cons->puts(", ");
cons->put_dec(index[i]);
}
cons->puts("}");
}

View File

@@ -1,9 +1,12 @@
#pragma once #pragma once
/// \file memory_pages.h
/// Structures related to handling memory paging.
#include <stdint.h> #include <stdint.h>
#include "kutil/enum_bitfields.h" #include "kutil/enum_bitfields.h"
/// Flags used by `page_block`.
enum class page_block_flags : uint32_t enum class page_block_flags : uint32_t
{ {
// Not a flag value, but for comparison // Not a flag value, but for comparison
@@ -22,6 +25,10 @@ enum class page_block_flags : uint32_t
}; };
IS_BITFIELD(page_block_flags); IS_BITFIELD(page_block_flags);
/// A block of contiguous pages. Each `page_block` represents contiguous
/// physical pages with the same attributes. A `page_block *` is also a
/// linked list of such structures.
struct page_block struct page_block
{ {
uint64_t physical_address; uint64_t physical_address;
@@ -31,15 +38,23 @@ struct page_block
page_block *next; page_block *next;
bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); } bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); }
uint64_t end() const { return physical_address + (count * 0x1000); } uint64_t physical_end() const { return physical_address + (count * 0x1000); }
uint64_t virtual_end() const { return virtual_address + (count * 0x1000); }
/// Traverse the list, joining adjacent blocks where possible.
/// \returns A linked list of freed page_block structures.
page_block * list_consolidate(); page_block * list_consolidate();
/// Traverse the list, printing debug info on this list.
/// \arg name [optional] String to print as the name of this list
void list_dump(const char *name = nullptr); void list_dump(const char *name = nullptr);
}; };
/// Helper struct for computing page table indices of a given address.
struct page_table_indices struct page_table_indices
{ {
page_table_indices(uint64_t v) : page_table_indices(uint64_t v = 0) :
index{ index{
(v >> 39) & 0x1ff, (v >> 39) & 0x1ff,
(v >> 30) & 0x1ff, (v >> 30) & 0x1ff,
@@ -47,8 +62,18 @@ struct page_table_indices
(v >> 12) & 0x1ff } (v >> 12) & 0x1ff }
{} {}
/// Get the index for a given level of page table.
uint64_t & operator[](size_t i) { return index[i]; } uint64_t & operator[](size_t i) { return index[i]; }
uint64_t index[4]; uint64_t index[4]; ///< Indices for each level of tables.
void dump();
}; };
/// Calculate a page-aligned address.
/// \arg p The address to align.
/// \returns The next page-aligned address _after_ `p`.
template <typename T> inline T page_align(T p) { return ((p - 1) & ~0xfffull) + 0x1000; }
/// Calculate a page-table-aligned address. That is, an address that is
/// page-aligned to the first page in a page table.
/// \arg p The address to align.
/// \returns The next page-table-aligned address _after_ `p`.
template <typename T> inline T page_table_align(T p) { return ((p - 1) & ~0x1fffffull) + 0x200000; }