diff --git a/src/kernel/memory_bootstrap.cpp b/src/kernel/memory_bootstrap.cpp index f6e5c73..77af51d 100644 --- a/src/kernel/memory_bootstrap.cpp +++ b/src/kernel/memory_bootstrap.cpp @@ -142,76 +142,23 @@ count_table_pages_needed(page_block *used) } -void -memory_manager::create(const void *memory_map, size_t map_length, size_t desc_length) +uint64_t +gather_block_lists( + uint64_t scratch, + const void *memory_map, + size_t map_length, + size_t desc_length, + page_block **free_head, + page_block **used_head) { - console *cons = console::get(); + int i = 0; + page_block **free = free_head; + page_block **used = used_head; - // The bootloader reserved 4 pages for page tables, which we'll use to bootstrap. - // The first one is the already-installed PML4, so grab it from CR3. - page_table *tables = nullptr; - __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (tables) ); - - // Now go through EFi's memory map and find a 4MiB region of free space to - // use as a scratch space. We'll use the 2MiB that fits naturally aligned - // into a single page table. - efi_memory_descriptor const *desc = - reinterpret_cast(memory_map); + page_block *block_list = reinterpret_cast(scratch); + efi_memory_descriptor const *desc = reinterpret_cast(memory_map); efi_memory_descriptor const *end = desc_incr(desc, map_length); - while (desc < end) { - if (desc->type == efi_memory_type::available && desc->pages >= 1024) - break; - - desc = desc_incr(desc, desc_length); - } - kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space."); - - uint64_t free_region = (desc->physical_start & 0x1fffff) == 0 ? - desc->physical_start : - desc->physical_start + 0x1fffff & ~0x1fffffull; - - // Offset-map this region into the higher half. - uint64_t next_free = free_region + 0xffff800000000000; - - cons->puts("Found region: "); - cons->put_hex(free_region); - cons->puts("\n"); - - // We'll need to copy any existing tables (except the PML4 which the - // bootloader gave us) into our 4 reserved pages so we can edit them. - page_table_indices fr_idx{free_region}; - fr_idx[0] += 256; // Flip the highest bit of the address - - if (tables[0].entries[fr_idx[0]] & 0x1) { - page_table *old_pdpt = tables[0].next(fr_idx[0]); - for (int i = 0; i < 512; ++i) tables[1].entries[i] = old_pdpt->entries[i]; - } else { - for (int i = 0; i < 512; ++i) tables[1].entries[i] = 0; - } - tables[0].entries[fr_idx[0]] = reinterpret_cast(&tables[1]) | 0xb; - - if (tables[1].entries[fr_idx[1]] & 0x1) { - page_table *old_pdt = tables[1].next(fr_idx[1]); - for (int i = 0; i < 512; ++i) tables[2].entries[i] = old_pdt->entries[i]; - } else { - for (int i = 0; i < 512; ++i) tables[2].entries[i] = 0; - } - tables[1].entries[fr_idx[1]] = reinterpret_cast(&tables[2]) | 0xb; - - for (int i = 0; i < 512; ++i) - tables[3].entries[i] = (free_region + 0x1000 * i) | 0xb; - tables[2].entries[fr_idx[2]] = reinterpret_cast(&tables[3]) | 0xb; - - // We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by - // taking inventory of free pages. - page_block *block_list = reinterpret_cast(next_free); - - int i = 0; - page_block *free_head = nullptr, **free = &free_head; - page_block *used_head = nullptr, **used = &used_head; - - desc = reinterpret_cast(memory_map); while (desc < end) { page_block *block = &block_list[i++]; block->physical_address = desc->physical_start; @@ -228,18 +175,16 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le case efi_memory_type::boot_services_code: case efi_memory_type::boot_services_data: case efi_memory_type::available: - if (free_region >= block->physical_address && free_region < block->end()) { + if (scratch >= block->physical_address && scratch < block->physical_end()) { // This is the scratch memory block, split off what we're not using block->virtual_address = block->physical_address + 0xffff800000000000; - - block->flags = page_block_flags::used - | page_block_flags::mapped - | page_block_flags::pending_free; + block->flags = page_block_flags::used | page_block_flags::mapped; if (block->count > 1024) { page_block *rest = &block_list[i++]; rest->physical_address = desc->physical_start + (1024*0x1000); rest->virtual_address = 0; + rest->flags = page_block_flags::free; rest->count = desc->pages - 1024; rest->next = nullptr; *free = rest; @@ -278,9 +223,71 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le desc = desc_incr(desc, desc_length); } - // Update the pointer to the next free page - next_free += i * sizeof(page_block); - next_free = ((next_free - 1) & ~0xfffull) + 0x1000; + return reinterpret_cast(&block_list[i]); +} + +void +memory_manager::create(const void *memory_map, size_t map_length, size_t desc_length) +{ + console *cons = console::get(); + + // The bootloader reserved 4 pages for page tables, which we'll use to bootstrap. + // The first one is the already-installed PML4, so grab it from CR3. + page_table *tables = nullptr; + __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (tables) ); + + // Now go through EFi's memory map and find a 4MiB region of free space to + // use as a scratch space. We'll use the 2MiB that fits naturally aligned + // into a single page table. + efi_memory_descriptor const *desc = + reinterpret_cast(memory_map); + efi_memory_descriptor const *end = desc_incr(desc, map_length); + + while (desc < end) { + if (desc->type == efi_memory_type::available && desc->pages >= 1024) + break; + + desc = desc_incr(desc, desc_length); + } + kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space."); + + uint64_t free_region = page_table_align(desc->physical_start); + + // Offset-map this region into the higher half. + uint64_t next_free = free_region + 0xffff800000000000; + + // We'll need to copy any existing tables (except the PML4 which the + // bootloader gave us) into our 4 reserved pages so we can edit them. + page_table_indices fr_idx{free_region}; + fr_idx[0] += 256; // Flip the highest bit of the address + + if (tables[0].entries[fr_idx[0]] & 0x1) { + page_table *old_pdpt = tables[0].next(fr_idx[0]); + for (int i = 0; i < 512; ++i) tables[1].entries[i] = old_pdpt->entries[i]; + } else { + for (int i = 0; i < 512; ++i) tables[1].entries[i] = 0; + } + tables[0].entries[fr_idx[0]] = reinterpret_cast(&tables[1]) | 0xb; + + if (tables[1].entries[fr_idx[1]] & 0x1) { + page_table *old_pdt = tables[1].next(fr_idx[1]); + for (int i = 0; i < 512; ++i) tables[2].entries[i] = old_pdt->entries[i]; + } else { + for (int i = 0; i < 512; ++i) tables[2].entries[i] = 0; + } + tables[1].entries[fr_idx[1]] = reinterpret_cast(&tables[2]) | 0xb; + + for (int i = 0; i < 512; ++i) + tables[3].entries[i] = (free_region + 0x1000 * i) | 0xb; + tables[2].entries[fr_idx[2]] = reinterpret_cast(&tables[3]) | 0xb; + + // We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by + // taking inventory of free pages. + page_block *free_head = nullptr; + page_block *used_head = nullptr; + next_free = gather_block_lists(next_free, memory_map, map_length, desc_length, + &free_head, &used_head); + next_free = page_align(next_free); // Now go back through these lists and consolidate free_head->list_consolidate(); @@ -290,7 +297,7 @@ memory_manager::create(const void *memory_map, size_t map_length, size_t desc_le // what the kernel actually has mapped. unsigned table_page_count = count_table_pages_needed(used_head); - cons->puts("To map currently-mapped pages, we need "); - cons->put_dec(table_page_count); - cons->puts(" pages of tables.\n"); + page_table *pages = reinterpret_cast(next_free); + next_free += table_page_count * 0x1000; + } diff --git a/src/kernel/memory_pages.cpp b/src/kernel/memory_pages.cpp index d276f16..a4618a1 100644 --- a/src/kernel/memory_pages.cpp +++ b/src/kernel/memory_pages.cpp @@ -11,7 +11,7 @@ page_block::list_consolidate() page_block *next = cur->next; if (next && cur->flags == next->flags && - cur->end() == next->physical_address) + cur->physical_end() == next->physical_address) { cur->count += next->count; cur->next = next->next; @@ -60,15 +60,3 @@ page_block::list_dump(const char *name) cons->put_dec(count); cons->puts("\n"); } - -void -page_table_indices::dump() -{ - console *cons = console::get(); - cons->puts("{"); - for (int i = 0; i < 4; ++i) { - if (i) cons->puts(", "); - cons->put_dec(index[i]); - } - cons->puts("}"); -} diff --git a/src/kernel/memory_pages.h b/src/kernel/memory_pages.h index 5ecb05d..e3558e5 100644 --- a/src/kernel/memory_pages.h +++ b/src/kernel/memory_pages.h @@ -1,9 +1,12 @@ #pragma once +/// \file memory_pages.h +/// Structures related to handling memory paging. #include #include "kutil/enum_bitfields.h" +/// Flags used by `page_block`. enum class page_block_flags : uint32_t { // Not a flag value, but for comparison @@ -22,6 +25,10 @@ enum class page_block_flags : uint32_t }; IS_BITFIELD(page_block_flags); + +/// A block of contiguous pages. Each `page_block` represents contiguous +/// physical pages with the same attributes. A `page_block *` is also a +/// linked list of such structures. struct page_block { uint64_t physical_address; @@ -31,15 +38,23 @@ struct page_block page_block *next; bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); } - uint64_t end() const { return physical_address + (count * 0x1000); } + uint64_t physical_end() const { return physical_address + (count * 0x1000); } + uint64_t virtual_end() const { return virtual_address + (count * 0x1000); } + /// Traverse the list, joining adjacent blocks where possible. + /// \returns A linked list of freed page_block structures. page_block * list_consolidate(); + + /// Traverse the list, printing debug info on this list. + /// \arg name [optional] String to print as the name of this list void list_dump(const char *name = nullptr); }; + +/// Helper struct for computing page table indices of a given address. struct page_table_indices { - page_table_indices(uint64_t v) : + page_table_indices(uint64_t v = 0) : index{ (v >> 39) & 0x1ff, (v >> 30) & 0x1ff, @@ -47,8 +62,18 @@ struct page_table_indices (v >> 12) & 0x1ff } {} + /// Get the index for a given level of page table. uint64_t & operator[](size_t i) { return index[i]; } - uint64_t index[4]; - - void dump(); + uint64_t index[4]; ///< Indices for each level of tables. }; + +/// Calculate a page-aligned address. +/// \arg p The address to align. +/// \returns The next page-aligned address _after_ `p`. +template inline T page_align(T p) { return ((p - 1) & ~0xfffull) + 0x1000; } + +/// Calculate a page-table-aligned address. That is, an address that is +/// page-aligned to the first page in a page table. +/// \arg p The address to align. +/// \returns The next page-table-aligned address _after_ `p`. +template inline T page_table_align(T p) { return ((p - 1) & ~0x1fffffull) + 0x200000; }