Switch page_manager to use frame_allocator.

Removed the frame allocation logic from page_manager and replaced it
with using an instance of frame_allocator instead. This had several
major ripple effects:

- memory_initalize() had to change to support this new world
  - Where to map used blocks is now passed as a flag, since blocks don't
    track their virtual address anymore
  - Instead of the complicated "find N contiguous pages that can be
    mapped in with one page table", we now just have the bootloader give
    us some (currently 64) pages to use both for tables and scratch
    space.
  - frame_allocator initialization was split into two steps to allow
    mapping used blocks before std::move()ing them over
This commit is contained in:
Justin C. Miller
2019-02-26 22:56:14 -08:00
parent 626eec4a31
commit 8cdc39fdee
13 changed files with 286 additions and 784 deletions

View File

@@ -64,7 +64,7 @@ loader_load_initrd(
status = loader_alloc_pages( status = loader_alloc_pages(
bootsvc, bootsvc,
INITRD_MEMTYPE, memtype_initrd,
&data->initrd_length, &data->initrd_length,
&data->initrd); &data->initrd);
CHECK_EFI_STATUS_OR_RETURN(status, L"Allocating pages"); CHECK_EFI_STATUS_OR_RETURN(status, L"Allocating pages");
@@ -153,7 +153,7 @@ loader_load_elf(
length = prog_header.mem_size; length = prog_header.mem_size;
void *addr = (void *)(prog_header.vaddr - KERNEL_VIRT_ADDRESS); void *addr = (void *)(prog_header.vaddr - KERNEL_VIRT_ADDRESS);
status = loader_alloc_pages(bootsvc, KERNEL_MEMTYPE, &length, &addr); status = loader_alloc_pages(bootsvc, memtype_kernel, &length, &addr);
CHECK_EFI_STATUS_OR_RETURN(status, L"Allocating kernel pages"); CHECK_EFI_STATUS_OR_RETURN(status, L"Allocating kernel pages");
if (data->kernel == 0) if (data->kernel == 0)
@@ -237,7 +237,7 @@ loader_load_kernel(
data->data_length += PAGE_SIZE; // extra page for map growth data->data_length += PAGE_SIZE; // extra page for map growth
status = loader_alloc_pages( status = loader_alloc_pages(
bootsvc, bootsvc,
KERNEL_DATA_MEMTYPE, memtype_data,
&data->data_length, &data->data_length,
&data->data); &data->data);
CHECK_EFI_STATUS_OR_RETURN(status, L"loader_alloc_pages: kernel data"); CHECK_EFI_STATUS_OR_RETURN(status, L"loader_alloc_pages: kernel data");

View File

@@ -12,18 +12,6 @@
#define KERNEL_VIRT_ADDRESS 0xFFFFFF0000000000 #define KERNEL_VIRT_ADDRESS 0xFFFFFF0000000000
#endif #endif
#ifndef KERNEL_MEMTYPE
#define KERNEL_MEMTYPE static_cast<EFI_MEMORY_TYPE>(0x80000000)
#endif
#ifndef INITRD_MEMTYPE
#define INITRD_MEMTYPE static_cast<EFI_MEMORY_TYPE>(0x80000001)
#endif
#ifndef KERNEL_DATA_MEMTYPE
#define KERNEL_DATA_MEMTYPE static_cast<EFI_MEMORY_TYPE>(0x80000002)
#endif
#ifndef KERNEL_FILENAME #ifndef KERNEL_FILENAME
#define KERNEL_FILENAME L"kernel.elf" #define KERNEL_FILENAME L"kernel.elf"
#endif #endif

View File

@@ -10,6 +10,10 @@
#include "memory.h" #include "memory.h"
#include "utility.h" #include "utility.h"
#ifndef SCRATCH_PAGES
#define SCRATCH_PAGES 64
#endif
#ifndef GIT_VERSION_WIDE #ifndef GIT_VERSION_WIDE
#define GIT_VERSION_WIDE L"no version" #define GIT_VERSION_WIDE L"no version"
#endif #endif
@@ -47,7 +51,7 @@ efi_main(EFI_HANDLE image_handle, EFI_SYSTEM_TABLE *system_table)
CHECK_EFI_STATUS_OR_RETURN(status, "console::initialize"); CHECK_EFI_STATUS_OR_RETURN(status, "console::initialize");
// From here on out, we can use CHECK_EFI_STATUS_OR_FAIL instead // From here on out, we can use CHECK_EFI_STATUS_OR_FAIL instead
memory_init_pointer_fixup(bootsvc, runsvc); memory_init_pointer_fixup(bootsvc, runsvc, SCRATCH_PAGES);
// Find ACPI tables. Ignore ACPI 1.0 if a 2.0 table is found. // Find ACPI tables. Ignore ACPI 1.0 if a 2.0 table is found.
// //
@@ -114,6 +118,7 @@ efi_main(EFI_HANDLE image_handle, EFI_SYSTEM_TABLE *system_table)
data_header->version = DATA_HEADER_VERSION; data_header->version = DATA_HEADER_VERSION;
data_header->length = sizeof(struct popcorn_data); data_header->length = sizeof(struct popcorn_data);
data_header->scratch_pages = SCRATCH_PAGES;
data_header->flags = 0; data_header->flags = 0;
data_header->initrd = load.initrd; data_header->initrd = load.initrd;

View File

@@ -4,6 +4,11 @@
#include "memory.h" #include "memory.h"
#include "utility.h" #include "utility.h"
const EFI_MEMORY_TYPE memtype_kernel = static_cast<EFI_MEMORY_TYPE>(0x80000000);
const EFI_MEMORY_TYPE memtype_data = static_cast<EFI_MEMORY_TYPE>(0x80000001);
const EFI_MEMORY_TYPE memtype_initrd = static_cast<EFI_MEMORY_TYPE>(0x80000002);
const EFI_MEMORY_TYPE memtype_scratch = static_cast<EFI_MEMORY_TYPE>(0x80000003);
#define INCREMENT_DESC(p, b) (EFI_MEMORY_DESCRIPTOR*)(((uint8_t*)(p))+(b)) #define INCREMENT_DESC(p, b) (EFI_MEMORY_DESCRIPTOR*)(((uint8_t*)(p))+(b))
size_t fixup_pointer_index = 0; size_t fixup_pointer_index = 0;
@@ -32,9 +37,13 @@ static const wchar_t *
memory_type_name(UINT32 value) memory_type_name(UINT32 value)
{ {
if (value >= (sizeof(memory_type_names) / sizeof(wchar_t *))) { if (value >= (sizeof(memory_type_names) / sizeof(wchar_t *))) {
if (value == KERNEL_DATA_MEMTYPE) return L"Kernel Data"; switch (value) {
else if (value == KERNEL_MEMTYPE) return L"Kernel Image"; case memtype_kernel: return L"Kernel Data";
else return L"Bad Type Value"; case memtype_data: return L"Kernel Data";
case memtype_initrd: return L"Initial Ramdisk";
case memtype_scratch: return L"Kernel Scratch Space";
default: return L"Bad Type Value";
}
} }
return memory_type_names[value]; return memory_type_names[value];
} }
@@ -50,7 +59,7 @@ memory_update_marked_addresses(EFI_EVENT UNUSED *event, void *context)
} }
EFI_STATUS EFI_STATUS
memory_init_pointer_fixup(EFI_BOOT_SERVICES *bootsvc, EFI_RUNTIME_SERVICES *runsvc) memory_init_pointer_fixup(EFI_BOOT_SERVICES *bootsvc, EFI_RUNTIME_SERVICES *runsvc, unsigned scratch_pages)
{ {
EFI_STATUS status; EFI_STATUS status;
EFI_EVENT event; EFI_EVENT event;
@@ -67,7 +76,7 @@ memory_init_pointer_fixup(EFI_BOOT_SERVICES *bootsvc, EFI_RUNTIME_SERVICES *runs
// Reserve a page for our replacement PML4, plus some pages for the kernel to use // Reserve a page for our replacement PML4, plus some pages for the kernel to use
// as page tables while it gets started. // as page tables while it gets started.
EFI_PHYSICAL_ADDRESS addr = 0; EFI_PHYSICAL_ADDRESS addr = 0;
status = bootsvc->AllocatePages(AllocateAnyPages, EfiLoaderData, 16, &addr); status = bootsvc->AllocatePages(AllocateAnyPages, memtype_scratch, scratch_pages, &addr);
CHECK_EFI_STATUS_OR_RETURN(status, "Failed to allocate page table pages."); CHECK_EFI_STATUS_OR_RETURN(status, "Failed to allocate page table pages.");
new_pml4 = (uint64_t *)addr; new_pml4 = (uint64_t *)addr;
@@ -187,9 +196,10 @@ memory_virtualize(EFI_RUNTIME_SERVICES *runsvc, struct memory_map *map)
EFI_MEMORY_DESCRIPTOR *d = map->entries; EFI_MEMORY_DESCRIPTOR *d = map->entries;
while (d < end) { while (d < end) {
switch (d->Type) { switch (d->Type) {
case KERNEL_MEMTYPE: case memtype_kernel:
case INITRD_MEMTYPE: case memtype_data:
case KERNEL_DATA_MEMTYPE: case memtype_initrd:
case memtype_scratch:
d->Attribute |= EFI_MEMORY_RUNTIME; d->Attribute |= EFI_MEMORY_RUNTIME;
d->VirtualStart = d->PhysicalStart + KERNEL_VIRT_ADDRESS; d->VirtualStart = d->PhysicalStart + KERNEL_VIRT_ADDRESS;

View File

@@ -1,6 +1,11 @@
#pragma once #pragma once
#include <efi/efi.h> #include <efi/efi.h>
extern const EFI_MEMORY_TYPE memtype_kernel;
extern const EFI_MEMORY_TYPE memtype_data;
extern const EFI_MEMORY_TYPE memtype_initrd;
extern const EFI_MEMORY_TYPE memtype_scratch;
struct memory_map { struct memory_map {
size_t length; size_t length;
size_t size; size_t size;
@@ -9,7 +14,10 @@ struct memory_map {
EFI_MEMORY_DESCRIPTOR *entries; EFI_MEMORY_DESCRIPTOR *entries;
}; };
EFI_STATUS memory_init_pointer_fixup(EFI_BOOT_SERVICES *bootsvc, EFI_RUNTIME_SERVICES *runsvc); EFI_STATUS memory_init_pointer_fixup(
EFI_BOOT_SERVICES *bootsvc,
EFI_RUNTIME_SERVICES *runsvc,
unsigned scratch_pages);
void memory_mark_pointer_fixup(void **p); void memory_mark_pointer_fixup(void **p);
EFI_STATUS memory_get_map_length(EFI_BOOT_SERVICES *bootsvc, size_t *size); EFI_STATUS memory_get_map_length(EFI_BOOT_SERVICES *bootsvc, size_t *size);

View File

@@ -13,7 +13,8 @@ struct popcorn_data {
uint16_t version; uint16_t version;
uint16_t length; uint16_t length;
uint32_t _reserved0; uint16_t _reserved0;
uint16_t scratch_pages;
uint32_t flags; uint32_t flags;
void *initrd; void *initrd;

View File

@@ -44,7 +44,7 @@ init_console()
log::enable(logs::memory, log::level::info); log::enable(logs::memory, log::level::info);
log::enable(logs::fs, log::level::debug); log::enable(logs::fs, log::level::debug);
log::enable(logs::task, log::level::debug); log::enable(logs::task, log::level::debug);
//log::enable(logs::boot, log::level::debug); log::enable(logs::boot, log::level::debug);
} }
void void
@@ -65,16 +65,17 @@ kernel_main(popcorn_data *header)
gdt_init(); gdt_init();
interrupts_init(); interrupts_init();
page_manager *pager = new (&g_page_manager) page_manager;
memory_initialize( memory_initialize(
header->scratch_pages,
header->memory_map, header->memory_map,
header->memory_map_length, header->memory_map_length,
header->memory_map_desc_size); header->memory_map_desc_size);
pager->map_offset_pointer( if (header->frame_buffer && header->frame_buffer_length) {
&header->frame_buffer, page_manager::get()->map_offset_pointer(
header->frame_buffer_length); &header->frame_buffer,
header->frame_buffer_length);
}
init_console(); init_console();
@@ -91,8 +92,8 @@ kernel_main(popcorn_data *header)
log::info(logs::boot, " %s%s (%d bytes).", f.executable() ? "*" : "", f.name(), f.size()); log::info(logs::boot, " %s%s (%d bytes).", f.executable() ? "*" : "", f.name(), f.size());
/* /*
pager->dump_pml4(nullptr, 0); page_manager::get()->dump_pml4(nullptr, 0);
pager->dump_blocks(true); page_manager::get()->dump_blocks(true);
*/ */
device_manager *devices = device_manager *devices =

View File

@@ -1,32 +1,65 @@
#include <algorithm>
#include <utility> #include <utility>
#include "kutil/assert.h" #include "kutil/assert.h"
#include "kutil/linked_list.h" #include "kutil/frame_allocator.h"
#include "kutil/slab_allocator.h" #include "kutil/heap_manager.h"
#include "io.h" #include "io.h"
#include "log.h"
#include "page_manager.h" #include "page_manager.h"
const unsigned efi_page_size = 0x1000; using kutil::frame_block;
const unsigned ident_page_flags = 0xb; using kutil::frame_block_flags;
using kutil::frame_block_list;
static const unsigned ident_page_flags = 0xb;
static const size_t page_size = page_manager::page_size;
extern kutil::frame_allocator g_frame_allocator;
kutil::heap_manager g_kernel_heap_manager;
void * mm_grow_callback(void *next, size_t length)
{
kassert(length % page_manager::page_size == 0,
"Heap manager requested a fractional page.");
size_t pages = length / page_manager::page_size;
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
g_page_manager.map_pages(reinterpret_cast<uintptr_t>(next), pages);
return next;
}
namespace { namespace {
// Page-by-page initial allocator for the initial page_block allocator // Page-by-page initial allocator for the initial frame_block allocator
struct page_consumer struct page_consumer
{ {
page_consumer(uintptr_t start) : current(start) {} page_consumer(uintptr_t start, unsigned count, unsigned used = 0) :
current(start + used * page_size),
used(used),
max(count) {}
void * operator()(size_t size) { void * get_page() {
kassert(size == page_manager::page_size, "page_consumer used with non-page size!"); kassert(used++ < max, "page_consumer ran out of pages");
void *retval = reinterpret_cast<void *>(current); void *retval = reinterpret_cast<void *>(current);
current += size; current += page_size;
return retval; return retval;
} }
uintptr_t current; void * operator()(size_t size) {
}; kassert(size == page_size, "page_consumer used with non-page size!");
} return get_page();
}
using block_list = kutil::linked_list<page_block>; unsigned left() const { return max - used; }
using block_allocator = kutil::slab_allocator<page_block, page_consumer &>;
uintptr_t current;
unsigned used, max;
};
using block_allocator =
kutil::slab_allocator<kutil::frame_block, page_consumer &>;
}
enum class efi_memory_type : uint32_t enum class efi_memory_type : uint32_t
{ {
@@ -49,66 +82,13 @@ enum class efi_memory_type : uint32_t
efi_max, efi_max,
popcorn_kernel = 0x80000000, popcorn_kernel = 0x80000000,
popcorn_font,
popcorn_data, popcorn_data,
popcorn_log, popcorn_initrd,
popcorn_pml4, popcorn_scratch,
popcorn_max popcorn_max
}; };
const char *efi_memory_type_names[] = {
" reserved",
" loader_code",
" loader_data",
" boot_services_code",
" boot_services_data",
"runtime_services_code",
"runtime_services_data",
" available",
" unusable",
" acpi_reclaim",
" acpi_nvs",
" mmio",
" mmio_port",
" pal_code",
" popcorn_kernel",
" popcorn_font",
" popcorn_data",
" popcorn_log",
" popcorn_pml4",
};
static const char *
get_efi_name(efi_memory_type t)
{
static const unsigned offset =
(unsigned)efi_memory_type::popcorn_kernel - (unsigned)efi_memory_type::efi_max;
return t >= efi_memory_type::popcorn_kernel ?
efi_memory_type_names[(unsigned)t - offset] :
efi_memory_type_names[(unsigned)t];
}
enum class efi_memory_flag : uint64_t
{
can_mark_uc = 0x0000000000000001, // uc = un-cacheable
can_mark_wc = 0x0000000000000002, // wc = write-combining
can_mark_wt = 0x0000000000000004, // wt = write through
can_mark_wb = 0x0000000000000008, // wb = write back
can_mark_uce = 0x0000000000000010, // uce = un-cacheable exported
can_mark_wp = 0x0000000000001000, // wp = write protected
can_mark_rp = 0x0000000000002000, // rp = read protected
can_mark_xp = 0x0000000000004000, // xp = exceute protected
can_mark_ro = 0x0000000000020000, // ro = read only
non_volatile = 0x0000000000008000,
more_reliable = 0x0000000000010000,
runtime = 0x8000000000000000
};
IS_BITFIELD(efi_memory_flag);
struct efi_memory_descriptor struct efi_memory_descriptor
{ {
efi_memory_type type; efi_memory_type type;
@@ -116,7 +96,7 @@ struct efi_memory_descriptor
uint64_t physical_start; uint64_t physical_start;
uint64_t virtual_start; uint64_t virtual_start;
uint64_t pages; uint64_t pages;
efi_memory_flag flags; uint64_t flags;
}; };
static const efi_memory_descriptor * static const efi_memory_descriptor *
@@ -126,68 +106,11 @@ desc_incr(const efi_memory_descriptor *d, size_t desc_length)
reinterpret_cast<const uint8_t *>(d) + desc_length); reinterpret_cast<const uint8_t *>(d) + desc_length);
} }
page_block_list::item_type *
remove_block_for(page_block_list &list, uintptr_t phys_start, size_t pages, page_block_list &cache)
{
// This is basically just the removal portion of page_manager::unmap_pages,
// but with physical addresses, and only ever removing a single block.
for (auto *item : list) {
if (!item->contains_physical(phys_start))
continue;
uint64_t size = page_manager::page_size * pages;
uint64_t end = phys_start + size;
uint64_t leading = phys_start - item->physical_address;
uint64_t trailing = item->physical_end() - end;
if (leading) {
uint64_t pages = leading / page_manager::page_size;
page_block_list::item_type *lead_block = cache.pop_front();
lead_block->copy(item);
lead_block->count = pages;
item->count -= pages;
item->physical_address += leading;
if (item->virtual_address)
item->virtual_address += leading;
list.insert_before(item, lead_block);
}
if (trailing) {
uint64_t pages = trailing / page_manager::page_size;
page_block_list::item_type *trail_block = cache.pop_front();
trail_block->copy(item);
trail_block->count = pages;
trail_block->physical_address += size;
item->count -= pages;
if (item->virtual_address)
trail_block->virtual_address += size;
list.insert_before(item, trail_block);
}
list.remove(item);
return item;
}
kassert(false, "Couldn't find block to remove");
return nullptr;
}
void void
gather_block_lists( gather_block_lists(
block_allocator &allocator, block_allocator &allocator,
block_list &used, frame_block_list &used,
block_list &free, frame_block_list &free,
const void *memory_map, const void *memory_map,
size_t map_length, size_t map_length,
size_t desc_length) size_t desc_length)
@@ -197,117 +120,68 @@ gather_block_lists(
while (desc < end) { while (desc < end) {
auto *block = allocator.pop(); auto *block = allocator.pop();
block->physical_address = desc->physical_start; block->address = desc->physical_start;
block->virtual_address = desc->virtual_start;
block->count = desc->pages; block->count = desc->pages;
bool block_used;
switch (desc->type) { switch (desc->type) {
case efi_memory_type::loader_code: case efi_memory_type::loader_code:
case efi_memory_type::loader_data: case efi_memory_type::loader_data:
block->flags = page_block_flags::used | page_block_flags::pending_free; block_used = true;
block->flags = frame_block_flags::pending_free;
break; break;
case efi_memory_type::boot_services_code: case efi_memory_type::boot_services_code:
case efi_memory_type::boot_services_data: case efi_memory_type::boot_services_data:
case efi_memory_type::available: case efi_memory_type::available:
block->flags = page_block_flags::free; block_used = false;
break; break;
case efi_memory_type::acpi_reclaim: case efi_memory_type::acpi_reclaim:
block_used = true;
block->flags = block->flags =
page_block_flags::used | frame_block_flags::acpi_wait |
page_block_flags::mapped | frame_block_flags::map_ident;
page_block_flags::acpi_wait;
block->virtual_address = block->physical_address;
break; break;
case efi_memory_type::persistent: case efi_memory_type::persistent:
block->flags = page_block_flags::nonvolatile; block_used = false;
block->flags = frame_block_flags::nonvolatile;
break;
case efi_memory_type::popcorn_kernel:
block_used = true;
block->flags = frame_block_flags::map_kernel;
break;
case efi_memory_type::popcorn_data:
case efi_memory_type::popcorn_initrd:
block_used = true;
block->flags =
frame_block_flags::pending_free |
frame_block_flags::map_kernel;
break;
case efi_memory_type::popcorn_scratch:
block_used = true;
block->flags = frame_block_flags::map_offset;
break; break;
default: default:
block->flags = page_block_flags::used | page_block_flags::permanent; block_used = true;
block->flags = frame_block_flags::permanent;
break; break;
} }
if (block->has_flag(page_block_flags::used)) { if (block_used)
if (block->virtual_address || !block->physical_address)
block->flags |= page_block_flags::mapped;
used.push_back(block); used.push_back(block);
} else { else
free.push_back(block); free.push_back(block);
}
desc = desc_incr(desc, desc_length); desc = desc_incr(desc, desc_length);
} }
} }
void
copy_new_table(page_table *base, unsigned index, page_table *new_table)
{
uint64_t entry = base->entries[index];
// If this is a large page and not a a table, bail out.
if(entry & 0x80) return;
if (entry & 0x1) {
page_table *old_next = reinterpret_cast<page_table *>(
base->entries[index] & ~0xffful);
for (int i = 0; i < 512; ++i) new_table->entries[i] = old_next->entries[i];
} else {
for (int i = 0; i < 512; ++i) new_table->entries[i] = 0;
}
base->entries[index] = reinterpret_cast<uint64_t>(new_table) | ident_page_flags;
}
static uint64_t
find_efi_free_aligned_pages(const void *memory_map, size_t map_length, size_t desc_length, unsigned pages)
{
efi_memory_descriptor const *desc =
reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length);
const unsigned want_space = pages * page_manager::page_size;
uint64_t start_phys = 0;
for (; desc < end; desc = desc_incr(desc, desc_length)) {
if (desc->type != efi_memory_type::available)
continue;
// See if the first wanted pages fit in one page table. If we
// find free memory at zero, skip ahead because we're not ready
// to deal with 0 being a valid pointer yet.
start_phys = desc->physical_start;
if (start_phys == 0)
start_phys += efi_page_size;
const uint64_t desc_end =
desc->physical_start + desc->pages * efi_page_size;
uint64_t end = start_phys + want_space;
if (end < desc_end) {
page_table_indices start_idx{start_phys};
page_table_indices end_idx{end};
if (start_idx[0] == end_idx[0] &&
start_idx[1] == end_idx[1] &&
start_idx[2] == end_idx[2])
break;
// Try seeing if the page-table-aligned version fits
start_phys = page_table_align(start_phys);
end = start_phys + want_space;
if (end < desc_end)
break;
}
}
kassert(desc < end, "Couldn't find wanted pages of aligned scratch space.");
return start_phys;
}
static unsigned static unsigned
check_needs_page_ident(page_table *table, unsigned index, page_table **free_pages) check_needs_page_ident(page_table *table, unsigned index, page_table **free_pages)
{ {
@@ -350,7 +224,7 @@ page_in_ident(
// Do a 2MiB page instead // Do a 2MiB page instead
tables[2]->entries[idx[2]] = phys_addr | 0x80 | ident_page_flags; tables[2]->entries[idx[2]] = phys_addr | 0x80 | ident_page_flags;
phys_addr += page_manager::page_size * 512; phys_addr += page_size * 512;
count -= 512; count -= 512;
if (count == 0) return pages_consumed; if (count == 0) return pages_consumed;
continue; continue;
@@ -362,7 +236,7 @@ page_in_ident(
for (; idx[3] < 512; idx[3] += 1) { for (; idx[3] < 512; idx[3] += 1) {
tables[3]->entries[idx[3]] = phys_addr | ident_page_flags; tables[3]->entries[idx[3]] = phys_addr | ident_page_flags;
phys_addr += page_manager::page_size; phys_addr += page_size;
if (--count == 0) return pages_consumed; if (--count == 0) return pages_consumed;
} }
} }
@@ -374,15 +248,9 @@ page_in_ident(
} }
void void
memory_initialize(const void *memory_map, size_t map_length, size_t desc_length) memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length)
{ {
// The bootloader reserved 16 pages for page tables, which we'll use to bootstrap. // make sure the options we want in CR4 are set
// The first one is the already-installed PML4, so grab it from CR3.
uint64_t cr3;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (cr3) );
page_table *tables = reinterpret_cast<page_table *>(cr3 & ~0xfffull);
// We'll need to make sure the options we want in CR4 are set
uint64_t cr4; uint64_t cr4;
__asm__ __volatile__ ( "mov %%cr4, %0" : "=r" (cr4) ); __asm__ __volatile__ ( "mov %%cr4, %0" : "=r" (cr4) );
cr4 |= 0x00080; // Enable global pages cr4 |= 0x00080; // Enable global pages
@@ -390,114 +258,98 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
cr4 |= 0x20000; // Enable PCIDs cr4 |= 0x20000; // Enable PCIDs
__asm__ __volatile__ ( "mov %0, %%cr4" :: "r" (cr4) ); __asm__ __volatile__ ( "mov %0, %%cr4" :: "r" (cr4) );
// Now go through EFi's memory map and find a region of scratch space. // The bootloader reserved "scratch_pages" pages for page tables and
const unsigned want_pages = 32; // scratch space, which we'll use to bootstrap. The first one is the
uint64_t free_region_start_phys = // already-installed PML4, so grab it from CR3.
find_efi_free_aligned_pages(memory_map, map_length, desc_length, want_pages); uint64_t scratch_phys;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (scratch_phys) );
scratch_phys &= ~0xfffull;
// Offset-map this region into the higher half. // The tables are ident-mapped currently, so the cr3 physical address works. But let's
uint64_t free_region_start_virt = // get them into the offset-mapped area asap.
free_region_start_phys + page_manager::page_offset; page_table *tables = reinterpret_cast<page_table *>(scratch_phys);
uintptr_t scratch_virt = scratch_phys + page_manager::page_offset;
uint64_t free_next = free_region_start_virt; uint64_t used_pages = 1; // starts with PML4
used_pages += page_in_ident(
// We'll need to copy any existing tables (except the PML4 which the &tables[0],
// bootloader gave us) into our reserved pages so we can edit them. scratch_phys,
page_table_indices fr_idx{free_region_start_virt}; scratch_virt,
scratch_pages,
copy_new_table(&tables[0], fr_idx[0], &tables[1]); tables + used_pages);
copy_new_table(&tables[1], fr_idx[1], &tables[2]);
copy_new_table(&tables[2], fr_idx[2], &tables[3]);
page_in_ident(&tables[0], free_region_start_phys, free_region_start_virt, want_pages, nullptr);
// We now have pages starting at "free_next" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_consumer allocator(free_next);
block_allocator block_slab(page_manager::page_size, allocator);
block_list used;
block_list free;
gather_block_lists(block_slab, used, free, memory_map, map_length, desc_length);
block_slab.allocate(); // Make sure we have extra
free_next = allocator.current;
// Now go back through these lists and consolidate
block_slab.append(page_block::consolidate(free));
block_slab.append(page_block::consolidate(used));
// Pull out the block that represents the bootstrap pages we've used
uint64_t used_bytes = free_next - free_region_start_virt;
uint64_t used_pages = used_bytes / page_manager::page_size;
uint64_t remaining_pages = want_pages - used_pages;
auto *removed = remove_block_for(free, free_region_start_phys,
used_pages, block_slab);
kassert(removed, "remove_block_for didn't find the bootstrap region.");
kassert(removed->physical_address == free_region_start_phys,
"remove_block_for found the wrong region.");
// Add it to the used list
removed->virtual_address = free_region_start_virt;
removed->flags = page_block_flags::used | page_block_flags::mapped;
used.sorted_insert(removed);
// Pull out the block that represents the rest
uint64_t free_next_phys = free_region_start_phys + used_bytes;
removed = remove_block_for(free, free_next_phys,
remaining_pages, block_slab);
kassert(removed, "remove_block_for didn't find the page table region.");
kassert(removed->physical_address == free_next_phys,
"remove_block_for found the wrong region.");
uint64_t pt_start_phys = removed->physical_address;
uint64_t pt_start_virt = removed->physical_address + page_manager::page_offset;
// Record that we're about to remap it into the page table address space
removed->virtual_address = pt_start_virt;
removed->flags = page_block_flags::used | page_block_flags::mapped;
used.sorted_insert(removed);
page_manager *pm = &g_page_manager;
// Actually remap them into page table space
pm->page_out(&tables[0], free_next, remaining_pages);
page_table_indices pg_idx{pt_start_virt};
copy_new_table(&tables[0], pg_idx[0], &tables[4]);
copy_new_table(&tables[4], pg_idx[1], &tables[5]);
copy_new_table(&tables[5], pg_idx[2], &tables[6]);
page_in_ident(&tables[0], pt_start_phys, pt_start_virt, remaining_pages, tables + 4);
// Make sure the page table is finished updating before we write to memory // Make sure the page table is finished updating before we write to memory
__sync_synchronize(); __sync_synchronize();
io_wait(); io_wait();
// We now have pages starting at "scratch_virt" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_consumer allocator(scratch_virt, scratch_pages, used_pages);
block_allocator block_slab(page_size, allocator);
frame_block_list used;
frame_block_list free;
gather_block_lists(block_slab, used, free, memory_map, map_length, desc_length);
block_slab.allocate(); // Make sure we have extra
// Now go back through these lists and consolidate
block_slab.append(frame_block::consolidate(free));
block_slab.append(frame_block::consolidate(used));
// Finally, build an acutal set of kernel page tables that just contains // Finally, build an acutal set of kernel page tables that just contains
// what the kernel actually has mapped, but making everything writable // what the kernel actually has mapped, but making everything writable
// (especially the page tables themselves) // (especially the page tables themselves)
page_table *pml4 = reinterpret_cast<page_table *>(pt_start_virt); page_table *pml4 = reinterpret_cast<page_table *>(allocator.get_page());
for (int i=0; i<512; ++i) pml4->entries[i] = 0; for (int i=0; i<512; ++i) pml4->entries[i] = 0;
kutil::frame_allocator *fa =
new (&g_frame_allocator) kutil::frame_allocator(std::move(block_slab));
page_manager *pm = new (&g_page_manager) page_manager(*fa);
// Give the rest to the page_manager's cache for use in page_in // Give the rest to the page_manager's cache for use in page_in
pm->free_table_pages(pml4 + 1, remaining_pages - 1); pm->free_table_pages(
reinterpret_cast<void *>(allocator.current),
allocator.left());
uintptr_t heap_start = page_manager::high_offset;
for (auto *block : used) { for (auto *block : used) {
if (!block->has_flag(page_block_flags::mapped)) continue; uintptr_t virt_addr = 0;
pm->page_in(pml4, block->physical_address, block->virtual_address, block->count);
switch (block->flags & frame_block_flags::map_mask) {
case frame_block_flags::map_ident:
virt_addr = block->address;
break;
case frame_block_flags::map_kernel:
virt_addr = block->address + page_manager::high_offset;
heap_start = std::max(heap_start,
virt_addr + block->count * page_size);
break;
case frame_block_flags::map_offset:
virt_addr = block->address + page_manager::page_offset;
break;
default:
break;
}
block->flags -= frame_block_flags::map_mask;
if (virt_addr)
pm->page_in(pml4, block->address, virt_addr, block->count);
} }
fa->init(std::move(free), std::move(used));
// Put our new PML4 into CR3 to start using it // Put our new PML4 into CR3 to start using it
page_manager::set_pml4(pml4); page_manager::set_pml4(pml4);
pm->m_kernel_pml4 = pml4;
// We now have all used memory mapped ourselves. Let the page_manager take // Set the heap manager
// over from here. new (&g_kernel_heap_manager) kutil::heap_manager(
g_page_manager.init( reinterpret_cast<void *>(heap_start),
std::move(free), mm_grow_callback);
std::move(used), kutil::setup::set_heap(&g_kernel_heap_manager);
std::move(block_slab));
} }

View File

@@ -1,13 +1,12 @@
#include <algorithm> #include <algorithm>
#include "kutil/assert.h" #include "kutil/assert.h"
#include "kutil/heap_manager.h"
#include "console.h" #include "console.h"
#include "log.h" #include "log.h"
#include "page_manager.h" #include "page_manager.h"
page_manager g_page_manager; kutil::frame_allocator g_frame_allocator;
kutil::heap_manager g_kernel_heap_manager; page_manager g_page_manager(g_frame_allocator);
static uintptr_t static uintptr_t
@@ -31,143 +30,10 @@ struct free_page_header
}; };
void * mm_grow_callback(void *next, size_t length) page_manager::page_manager(kutil::frame_allocator &frames) :
m_page_cache(nullptr),
m_frames(frames)
{ {
kassert(length % page_manager::page_size == 0,
"Heap manager requested a fractional page.");
size_t pages = length / page_manager::page_size;
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
g_page_manager.map_pages(reinterpret_cast<uintptr_t>(next), pages);
return next;
}
int
page_block::compare(const page_block *rhs) const
{
if (virtual_address < rhs->virtual_address)
return -1;
else if (virtual_address > rhs->virtual_address)
return 1;
if (physical_address < rhs->physical_address)
return -1;
else if (physical_address > rhs->physical_address)
return 1;
return 0;
}
page_block_list
page_block::consolidate(page_block_list &list)
{
page_block_list freed;
for (auto *cur : list) {
auto *next = cur->next();
while (next &&
cur->flags == next->flags &&
cur->physical_end() == next->physical_address &&
(!cur->has_flag(page_block_flags::mapped) ||
cur->virtual_end() == next->virtual_address)) {
cur->count += next->count;
list.remove(next);
freed.push_back(next);
}
}
return freed;
}
void
page_block::dump(const page_block_list &list, const char *name, bool show_unmapped)
{
log::info(logs::memory, "Block list %s:", name);
int count = 0;
for (auto *cur : list) {
count += 1;
if (!(show_unmapped || cur->has_flag(page_block_flags::mapped)))
continue;
if (cur->virtual_address) {
page_table_indices start{cur->virtual_address};
log::info(logs::memory, " %016lx %08x [%6d] %016lx (%d,%d,%d,%d)",
cur->physical_address,
cur->flags,
cur->count,
cur->virtual_address,
start[0], start[1], start[2], start[3]);
} else {
page_table_indices start{cur->virtual_address};
log::info(logs::memory, " %016lx %08x [%6d]",
cur->physical_address,
cur->flags,
cur->count);
}
}
log::info(logs::memory, " Total: %d", count);
}
void
page_block::zero()
{
physical_address = 0;
virtual_address = 0;
count = 0;
flags = page_block_flags::free;
}
void
page_block::copy(page_block *other)
{
physical_address = other->physical_address;
virtual_address = other->virtual_address;
count = other->count;
flags = other->flags;
}
page_manager::page_manager() :
m_block_slab(page_size),
m_page_cache(nullptr)
{
kassert(this == &g_page_manager, "Attempt to create another page_manager.");
}
void
page_manager::init(
page_block_list free,
page_block_list used,
page_block_list cache)
{
m_free.append(free);
m_used.append(used);
m_block_slab.append(cache);
consolidate_blocks();
// Initialize the kernel memory manager
uintptr_t end = 0;
for (auto *block : m_used) {
if (block->virtual_address &&
block->virtual_address < page_offset) {
end = block->virtual_end();
} else {
break;
}
}
new (&g_kernel_heap_manager) kutil::heap_manager(
reinterpret_cast<void *>(end),
mm_grow_callback);
kutil::setup::set_heap(&g_kernel_heap_manager);
m_kernel_pml4 = get_pml4();
} }
page_table * page_table *
@@ -203,33 +69,11 @@ page_manager::map_offset_pointer(void **pointer, size_t length)
uintptr_t v = *p + page_offset; uintptr_t v = *p + page_offset;
uintptr_t c = ((length - 1) / page_size) + 1; uintptr_t c = ((length - 1) / page_size) + 1;
// TODO: cleanly search/split this as a block out of used/free if possible
auto *block = m_block_slab.pop();
// TODO: page-align
block->physical_address = *p;
block->virtual_address = v;
block->count = c;
block->flags =
page_block_flags::used |
page_block_flags::mapped |
page_block_flags::mmio;
m_used.sorted_insert(block);
page_table *pml4 = get_pml4(); page_table *pml4 = get_pml4();
page_in(pml4, *p, v, c); page_in(pml4, *p, v, c);
*p = v; *p = v;
} }
void
page_manager::dump_blocks(bool used_only)
{
page_block::dump(m_used, "used", true);
if (!used_only)
page_block::dump(m_free, "free", true);
}
void void
page_manager::dump_pml4(page_table *pml4, int max_index) page_manager::dump_pml4(page_table *pml4, int max_index)
{ {
@@ -243,17 +87,8 @@ page_manager::get_table_page()
{ {
if (!m_page_cache) { if (!m_page_cache) {
uintptr_t phys = 0; uintptr_t phys = 0;
size_t n = pop_pages(32, &phys); size_t n = m_frames.allocate(32, &phys);
uintptr_t virt = phys + page_offset; uintptr_t virt = phys + page_offset;
auto *block = m_block_slab.pop();
block->physical_address = phys;
block->virtual_address = virt;
block->count = n;
m_used.sorted_insert(block);
page_in(get_pml4(), phys, virt, n); page_in(get_pml4(), phys, virt, n);
m_page_cache = reinterpret_cast<free_page_header *>(virt); m_page_cache = reinterpret_cast<free_page_header *>(virt);
@@ -288,13 +123,6 @@ page_manager::free_table_pages(void *pages, size_t count)
} }
} }
void
page_manager::consolidate_blocks()
{
m_block_slab.append(page_block::consolidate(m_free));
m_block_slab.append(page_block::consolidate(m_used));
}
void * void *
page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4) page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4)
{ {
@@ -302,21 +130,8 @@ page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *
if (!pml4) pml4 = get_pml4(); if (!pml4) pml4 = get_pml4();
while (count) { while (count) {
kassert(!m_free.empty(), "page_manager::map_pages ran out of free pages!");
uintptr_t phys = 0; uintptr_t phys = 0;
size_t n = pop_pages(count, &phys); size_t n = m_frames.allocate(count, &phys);
auto *block = m_block_slab.pop();
block->physical_address = phys;
block->virtual_address = address;
block->count = n;
block->flags =
page_block_flags::used |
page_block_flags::mapped;
m_used.sorted_insert(block);
log::debug(logs::memory, "Paging in %d pages at p:%016lx to v:%016lx into %016lx table", log::debug(logs::memory, "Paging in %d pages at p:%016lx to v:%016lx into %016lx table",
n, phys, address, pml4); n, phys, address, pml4);
@@ -330,101 +145,11 @@ page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *
return ret; return ret;
} }
void *
page_manager::map_offset_pages(size_t count)
{
page_table *pml4 = get_pml4();
for (auto *free : m_free) {
if (free->count < count) continue;
auto *used = m_block_slab.pop();
used->count = count;
used->physical_address = free->physical_address;
used->virtual_address = used->physical_address + page_offset;
used->flags =
page_block_flags::used |
page_block_flags::mapped;
m_used.sorted_insert(used);
free->physical_address += count * page_size;
free->count -= count;
if (free->count == 0) {
m_free.remove(free);
free->zero();
m_block_slab.push(free);
}
log::debug(logs::memory, "Got request for offset map %016lx [%d]", used->virtual_address, count);
page_in(pml4, used->physical_address, used->virtual_address, count);
return reinterpret_cast<void *>(used->virtual_address);
}
return nullptr;
}
void void
page_manager::unmap_pages(void* address, size_t count) page_manager::unmap_pages(void* address, size_t count)
{ {
uintptr_t addr = reinterpret_cast<uintptr_t>(address); // TODO: uh, actually unmap that shit??
size_t block_count = 0; m_frames.free(reinterpret_cast<uintptr_t>(address), count);
for (auto *block : m_used) {
if (!block->contains(addr)) continue;
size_t size = page_size * count;
uintptr_t end = addr + size;
size_t leading = addr - block->virtual_address;
size_t trailing =
end > block->virtual_end() ?
0 : (block->virtual_end() - end);
if (leading) {
size_t pages = leading / page_size;
auto *lead_block = m_block_slab.pop();
lead_block->copy(block);
lead_block->count = pages;
block->count -= pages;
block->physical_address += leading;
block->virtual_address += leading;
m_used.insert_before(block, lead_block);
}
if (trailing) {
size_t pages = trailing / page_size;
auto *trail_block = m_block_slab.pop();
trail_block->copy(block);
trail_block->count = pages;
trail_block->physical_address += size;
trail_block->virtual_address += size;
block->count -= pages;
m_used.insert_after(block, trail_block);
}
addr += block->count * page_size;
block->virtual_address = 0;
block->flags = block->flags &
~(page_block_flags::used | page_block_flags::mapped);
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
}
kassert(block_count, "Couldn't find existing mapped pages to unmap");
} }
void void
@@ -511,25 +236,6 @@ page_manager::page_out(page_table *pml4, uintptr_t virt_addr, size_t count)
kassert(0, "Ran to end of page_out"); kassert(0, "Ran to end of page_out");
} }
size_t
page_manager::pop_pages(size_t count, uintptr_t *address)
{
kassert(!m_free.empty(), "page_manager::pop_pages ran out of free pages!");
auto *first = m_free.front();
unsigned n = std::min(count, static_cast<size_t>(first->count));
*address = first->physical_address;
first->physical_address += n * page_size;
first->count -= n;
if (first->count == 0)
m_block_slab.push(m_free.pop_front());
return n;
}
void void
page_table::dump(int level, int max_index, uint64_t offset) page_table::dump(int level, int max_index, uint64_t offset)
{ {
@@ -569,4 +275,25 @@ page_table::dump(int level, int max_index, uint64_t offset)
} }
} }
page_table_indices::page_table_indices(uint64_t v) :
index{
(v >> 39) & 0x1ff,
(v >> 30) & 0x1ff,
(v >> 21) & 0x1ff,
(v >> 12) & 0x1ff }
{}
uintptr_t
page_table_indices::addr() const
{
return
(index[0] << 39) |
(index[1] << 30) |
(index[2] << 21) |
(index[3] << 12);
}
bool operator==(const page_table_indices &l, const page_table_indices &r)
{
return l[0] == r[0] && l[1] == r[1] && l[2] == r[2] && l[3] == r[3];
}

View File

@@ -6,17 +6,14 @@
#include <stdint.h> #include <stdint.h>
#include "kutil/enum_bitfields.h" #include "kutil/enum_bitfields.h"
#include "kutil/frame_allocator.h"
#include "kutil/linked_list.h" #include "kutil/linked_list.h"
#include "kutil/slab_allocator.h" #include "kutil/slab_allocator.h"
struct page_block;
struct page_table; struct page_table;
struct free_page_header; struct free_page_header;
using page_block_list = kutil::linked_list<page_block>; /// Manager for allocation and mapping of pages
using page_block_slab = kutil::slab_allocator<page_block>;
/// Manager for allocation of physical pages.
class page_manager class page_manager
{ {
public: public:
@@ -35,7 +32,7 @@ public:
/// Initial process thread's stack size, in pages /// Initial process thread's stack size, in pages
static const unsigned initial_stack_pages = 1; static const unsigned initial_stack_pages = 1;
page_manager(); page_manager(kutil::frame_allocator &frames);
/// Helper to get the number of pages needed for a given number of bytes. /// Helper to get the number of pages needed for a given number of bytes.
/// \arg bytes The number of bytes desired /// \arg bytes The number of bytes desired
@@ -78,13 +75,6 @@ public:
/// \returns A pointer to the start of the mapped region /// \returns A pointer to the start of the mapped region
void * map_pages(uintptr_t address, size_t count, bool user = false, page_table *pml4 = nullptr); void * map_pages(uintptr_t address, size_t count, bool user = false, page_table *pml4 = nullptr);
/// Allocate and map contiguous pages into virtual memory, with
/// a constant offset from their physical address.
/// \arg count The number of pages to map
/// \returns A pointer to the start of the mapped region, or
/// nullptr if no region could be found to fit the request.
void * map_offset_pages(size_t count);
/// Unmap existing pages from memory. /// Unmap existing pages from memory.
/// \arg address The virtual address of the memory to unmap /// \arg address The virtual address of the memory to unmap
/// \arg count The number of pages to unmap /// \arg count The number of pages to unmap
@@ -111,10 +101,6 @@ public:
return kutil::offset_pointer(reinterpret_cast<void *>(a), page_offset); return kutil::offset_pointer(reinterpret_cast<void *>(a), page_offset);
} }
/// Log the current free/used block lists.
/// \arg used_only If true, skip printing free list. Default false.
void dump_blocks(bool used_only = false);
/// Dump the given or current PML4 to the console /// Dump the given or current PML4 to the console
/// \arg pml4 The page table to use, null for the current one /// \arg pml4 The page table to use, null for the current one
/// \arg max_index The max index of pml4 to print /// \arg max_index The max index of pml4 to print
@@ -125,20 +111,6 @@ public:
static page_manager * get(); static page_manager * get();
private: private:
/// Set up the memory manager from bootstraped memory
void init(
page_block_list free,
page_block_list used,
page_block_list cache);
/// Create a `page_block` struct or pull one from the cache.
/// \returns An empty `page_block` struct
page_block * get_block();
/// Return a list of `page_block` structs to the cache.
/// \arg block A list of `page_block` structs
void free_blocks(page_block *block);
/// Allocate a page for a page table, or pull one from the cache /// Allocate a page for a page table, or pull one from the cache
/// \returns An empty page mapped in page space /// \returns An empty page mapped in page space
page_table * get_table_page(); page_table * get_table_page();
@@ -148,10 +120,6 @@ private:
/// \arg count Number of pages in the range /// \arg count Number of pages in the range
void free_table_pages(void *pages, size_t count); void free_table_pages(void *pages, size_t count);
/// Consolidate the free and used block lists. Return freed blocks
/// to the cache.
void consolidate_blocks();
/// Helper function to allocate a new page table. If table entry `i` in /// Helper function to allocate a new page table. If table entry `i` in
/// table `base` is empty, allocate a new page table and point `base[i]` at /// table `base` is empty, allocate a new page table and point `base[i]` at
/// it. /// it.
@@ -182,23 +150,11 @@ private:
uintptr_t virt_addr, uintptr_t virt_addr,
size_t count); size_t count);
/// Get free pages from the free list. Only pages from the first free block
/// are returned, so the number may be less than requested, but they will
/// be contiguous. Pages will not be mapped into virtual memory.
/// \arg count The maximum number of pages to get
/// \arg address [out] The address of the first page
/// \returns The number of pages retrieved
size_t pop_pages(size_t count, uintptr_t *address);
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
page_block_list m_free; ///< Free pages list
page_block_list m_used; ///< In-use pages list
page_block_slab m_block_slab; ///< page_block slab allocator
free_page_header *m_page_cache; ///< Cache of free pages to use for tables free_page_header *m_page_cache; ///< Cache of free pages to use for tables
kutil::frame_allocator &m_frames;
friend void memory_initialize(const void *, size_t, size_t); friend void memory_initialize(uint16_t, const void *, size_t, size_t);
page_manager(const page_manager &) = delete; page_manager(const page_manager &) = delete;
}; };
@@ -207,67 +163,6 @@ extern page_manager g_page_manager;
inline page_manager * page_manager::get() { return &g_page_manager; } inline page_manager * page_manager::get() { return &g_page_manager; }
/// Flags used by `page_block`.
enum class page_block_flags : uint32_t
{
free = 0x00000000, ///< Not a flag, value for free memory
used = 0x00000001, ///< Memory is in use
mapped = 0x00000002, ///< Memory is mapped to virtual address
mmio = 0x00000010, ///< Memory is a MMIO region
nonvolatile = 0x00000020, ///< Memory is non-volatile storage
pending_free = 0x10000000, ///< Memory should be freed
acpi_wait = 0x40000000, ///< Memory should be freed after ACPI init
permanent = 0x80000000, ///< Memory is permanently unusable
max_flags
};
IS_BITFIELD(page_block_flags);
/// A block of contiguous pages. Each `page_block` represents contiguous
/// physical pages with the same attributes. A `page_block *` is also a
/// linked list of such structures.
struct page_block
{
uintptr_t physical_address;
uintptr_t virtual_address;
uint32_t count;
page_block_flags flags;
inline bool has_flag(page_block_flags f) const { return bitfield_has(flags, f); }
inline uintptr_t physical_end() const { return physical_address + (count * page_manager::page_size); }
inline uintptr_t virtual_end() const { return virtual_address + (count * page_manager::page_size); }
inline bool contains(uintptr_t vaddr) const { return vaddr >= virtual_address && vaddr < virtual_end(); }
inline bool contains_physical(uintptr_t addr) const { return addr >= physical_address && addr < physical_end(); }
/// Helper to zero out a block and optionally set the next pointer.
void zero();
/// Helper to copy a bock from another block
/// \arg other The block to copy from
void copy(page_block *other);
/// Compare two blocks by address.
/// \arg rhs The right-hand comparator
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
int compare(const page_block *rhs) const;
/// Traverse the list, joining adjacent blocks where possible.
/// \arg list The list to consolidate
/// \returns A linked list of freed page_block structures.
static page_block_list consolidate(page_block_list &list);
/// Traverse the list, printing debug info on this list.
/// \arg list The list to print
/// \arg name [optional] String to print as the name of this list
/// \arg show_permanent [optional] If false, hide unmapped blocks
static void dump(const page_block_list &list, const char *name = nullptr, bool show_unmapped = false);
};
/// Struct to allow easy accessing of a memory page being used as a page table. /// Struct to allow easy accessing of a memory page being used as a page table.
struct page_table struct page_table
@@ -293,19 +188,19 @@ struct page_table
/// Helper struct for computing page table indices of a given address. /// Helper struct for computing page table indices of a given address.
struct page_table_indices struct page_table_indices
{ {
page_table_indices(uint64_t v = 0) : page_table_indices(uint64_t v = 0);
index{
(v >> 39) & 0x1ff, uintptr_t addr() const;
(v >> 30) & 0x1ff,
(v >> 21) & 0x1ff, inline operator uintptr_t() const { return addr(); }
(v >> 12) & 0x1ff }
{}
/// Get the index for a given level of page table. /// Get the index for a given level of page table.
uint64_t & operator[](size_t i) { return index[i]; } uint64_t & operator[](size_t i) { return index[i]; }
uint64_t operator[](size_t i) const { return index[i]; }
uint64_t index[4]; ///< Indices for each level of tables. uint64_t index[4]; ///< Indices for each level of tables.
}; };
bool operator==(const page_table_indices &l, const page_table_indices &r);
/// Calculate a page-aligned address. /// Calculate a page-aligned address.
/// \arg p The address to align. /// \arg p The address to align.
@@ -336,4 +231,4 @@ inline size_t page_count(size_t n) { return ((n - 1) / page_manager::page_size)
/// Bootstrap the memory managers. /// Bootstrap the memory managers.
void memory_initialize(const void *memory_map, size_t map_length, size_t desc_length); void memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length);

View File

@@ -53,14 +53,18 @@ frame_block::copy(frame_block *other)
frame_allocator::frame_allocator( frame_allocator::frame_allocator(
frame_block_list free,
frame_block_list used,
frame_block_list cache) frame_block_list cache)
{
m_block_slab.append(cache);
}
void
frame_allocator::init(
frame_block_list free,
frame_block_list used)
{ {
m_free.append(free); m_free.append(free);
m_used.append(used); m_used.append(used);
m_block_slab.append(cache);
consolidate_blocks(); consolidate_blocks();
} }

View File

@@ -21,14 +21,19 @@ public:
/// Size of a single page frame. /// Size of a single page frame.
static const size_t frame_size = 0x1000; static const size_t frame_size = 0x1000;
/// Constructor. Sets up the frame allocator from bootstraped memory. /// Default constructor
frame_allocator() = default;
/// Constructor with a provided initial frame_block cache.
/// \arg cache List of pre-allocated but unused frame_block structures
frame_allocator(frame_block_list cache);
/// Initialize the frame allocator from bootstraped memory.
/// \arg free List of free blocks /// \arg free List of free blocks
/// \arg used List of currently used blocks /// \arg used List of currently used blocks
/// \arg cache List of pre-allocated but unused frame_block structures void init(
frame_allocator(
frame_block_list free, frame_block_list free,
frame_block_list used, frame_block_list used);
frame_block_list cache);
/// Get free frames from the free list. Only frames from the first free block /// Get free frames from the free list. Only frames from the first free block
/// are returned, so the number may be less than requested, but they will /// are returned, so the number may be less than requested, but they will
@@ -59,14 +64,22 @@ private:
/// Flags used by `frame_block`. /// Flags used by `frame_block`.
enum class frame_block_flags : uint32_t enum class frame_block_flags : uint32_t
{ {
none = 0x00, none = 0x0000,
mmio = 0x01, ///< Memory is a MMIO region mmio = 0x0001, ///< Memory is a MMIO region
nonvolatile = 0x02, ///< Memory is non-volatile storage nonvolatile = 0x0002, ///< Memory is non-volatile storage
pending_free = 0x10, ///< Memory should be freed pending_free = 0x0020, ///< Memory should be freed
acpi_wait = 0x40, ///< Memory should be freed after ACPI init acpi_wait = 0x0040, ///< Memory should be freed after ACPI init
permanent = 0x80 ///< Memory is permanently unusable permanent = 0x0080, ///< Memory is permanently unusable
// The following are used only during the memory bootstraping
// process, and tell the page manager where to initially map
// the given block.
map_ident = 0x0100, ///< Identity map
map_kernel = 0x0200, ///< Map into normal kernel space
map_offset = 0x0400, ///< Map into offset kernel space
map_mask = 0x0700, ///< Mask of all map_* values
}; };
} // namespace kutil } // namespace kutil

View File

@@ -31,10 +31,8 @@ TEST_CASE( "Frame allocator tests", "[memory frame]" )
f->count = 1; f->count = 1;
free.sorted_insert(f); free.sorted_insert(f);
frame_allocator fa( frame_allocator fa(std::move(cache));
std::move(free), fa.init(std::move(free), std::move(used));
std::move(used),
std::move(cache));
uintptr_t a = 0; uintptr_t a = 0;
size_t c = fa.allocate(2, &a); size_t c = fa.allocate(2, &a);