diff --git a/NOTES.md b/NOTES.md index 330d32b..a88c8ad 100644 --- a/NOTES.md +++ b/NOTES.md @@ -5,4 +5,5 @@ - Better page-allocation model - Reclaim skipped bootstrap scratch space - Allow for more than one IOAPIC in ACPI module +- Move list functions to be standalone in case of null diff --git a/src/kernel/memory.h b/src/kernel/memory.h index 0df400d..1b73129 100644 --- a/src/kernel/memory.h +++ b/src/kernel/memory.h @@ -14,6 +14,8 @@ public: private: friend class page_manager; + + }; extern memory_manager g_memory_manager; diff --git a/src/kernel/memory_bootstrap.cpp b/src/kernel/memory_bootstrap.cpp index 00bc77c..e51ac89 100644 --- a/src/kernel/memory_bootstrap.cpp +++ b/src/kernel/memory_bootstrap.cpp @@ -102,12 +102,6 @@ desc_incr(const efi_memory_descriptor *d, size_t desc_length) reinterpret_cast(d) + desc_length); } -struct page_table -{ - uint64_t entries[512]; - page_table * next(int i) const { return reinterpret_cast(entries[i] & ~0xfffull); } -}; - static unsigned count_table_pages_needed(page_block *used) { @@ -145,7 +139,8 @@ count_table_pages_needed(page_block *used) uint64_t gather_block_lists( - uint64_t scratch, + uint64_t scratch_phys, + uint64_t scratch_virt, const void *memory_map, size_t map_length, size_t desc_length, @@ -156,7 +151,7 @@ gather_block_lists( page_block **free = free_head; page_block **used = used_head; - page_block *block_list = reinterpret_cast(scratch); + page_block *block_list = reinterpret_cast(scratch_virt); efi_memory_descriptor const *desc = reinterpret_cast(memory_map); efi_memory_descriptor const *end = desc_incr(desc, map_length); @@ -176,10 +171,10 @@ gather_block_lists( case efi_memory_type::boot_services_code: case efi_memory_type::boot_services_data: case efi_memory_type::available: - if (scratch >= block->physical_address && scratch < block->physical_end()) { + if (scratch_phys >= block->physical_address && scratch_phys < block->physical_end()) { // This is the scratch memory block, split off what we're not using block->virtual_address = block->physical_address + page_manager::high_offset; - block->flags = page_block_flags::used | page_block_flags::mapped; + block->flags = page_block_flags::used; if (block->count > 1024) { page_block *rest = &block_list[i++]; @@ -227,65 +222,21 @@ gather_block_lists( return reinterpret_cast(&block_list[i]); } -unsigned check_needs_page(page_table *table, unsigned index, page_table **free_pages) -{ - if (table->entries[index] & 0x1 == 1) return 0; - - kassert(*free_pages, "check_needs_page needed to allocate but had no free pages"); - - page_table *new_table = (*free_pages)++; - for (int i=0; i<512; ++i) new_table->entries[i] = 0; - table->entries[index] = reinterpret_cast(new_table) | 0xb; - return 1; -} - -unsigned page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count, page_table *free_pages) -{ - page_table_indices idx{virt_addr}; - page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; - - unsigned pages_consumed = 0; - for (; idx[0] < 512; idx[0] += 1) { - pages_consumed += check_needs_page(tables[0], idx[0], &free_pages); - tables[1] = reinterpret_cast( - tables[0]->entries[idx[0]] & ~0xfffull); - - for (; idx[1] < 512; idx[1] += 1) { - pages_consumed += check_needs_page(tables[1], idx[1], &free_pages); - tables[2] = reinterpret_cast( - tables[1]->entries[idx[1]] & ~0xfffull); - - for (; idx[2] < 512; idx[2] += 1) { - pages_consumed += check_needs_page(tables[2], idx[2], &free_pages); - tables[3] = reinterpret_cast( - tables[2]->entries[idx[2]] & ~0xfffull); - - for (; idx[3] < 512; idx[3] += 1) { - tables[3]->entries[idx[3]] = phys_addr | 0xb; - phys_addr += page_manager::page_size; - if (--count == 0) return pages_consumed; - } - } - } - } - - kassert(0, "Ran to end of page_in"); -} - page_block * fill_page_with_blocks(uint64_t start) { - uint64_t space = page_align(start) - start; - uint64_t count = space / sizeof(page_block); + uint64_t end = page_align(start); page_block *blocks = reinterpret_cast(start); - kutil::memset(blocks, 0, sizeof(page_block)*count); + page_block *endp = reinterpret_cast(end - sizeof(page_block)); + if (blocks >= endp) + return nullptr; - page_block *head = nullptr, **insert = &head; - for (unsigned i = 0; i < count; ++i) { - *insert = &blocks[i]; - insert = &blocks[i].next; + page_block *cur = blocks; + while (cur < endp) { + cur->zero(cur + 1); + cur += 1; } - - return head; + cur->next = 0; + return blocks; } void @@ -314,16 +265,14 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des kassert(desc < end, "Couldn't find 4MiB of contiguous scratch space."); // Offset-map this region into the higher half. - uint64_t free_region_start = desc->physical_start; - uint64_t free_region = page_table_align(free_region_start); - uint64_t next_free = free_region + page_manager::high_offset; - cons->puts("Skipping "); - cons->put_dec(free_region - free_region_start); - cons->puts(" bytes to get page-table-aligned.\n"); + uint64_t free_start_phys = desc->physical_start; + uint64_t free_start = free_start_phys + page_manager::high_offset; + uint64_t free_aligned_phys = page_table_align(free_start_phys); + uint64_t free_next = free_aligned_phys + page_manager::high_offset; // We'll need to copy any existing tables (except the PML4 which the // bootloader gave us) into our 4 reserved pages so we can edit them. - page_table_indices fr_idx{free_region}; + page_table_indices fr_idx{free_aligned_phys}; fr_idx[0] += 256; // Flip the highest bit of the address if (tables[0].entries[fr_idx[0]] & 0x1) { @@ -344,28 +293,31 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des // No need to copy the last-level page table, we're overwriting the whole thing tables[2].entries[fr_idx[2]] = reinterpret_cast(&tables[3]) | 0xb; - page_in(&tables[0], free_region, next_free, 512, nullptr); + page_in(&tables[0], free_aligned_phys, free_next, 512, nullptr); - // We now have 2MiB starting at "free_region" to bootstrap ourselves. Start by + // We now have 2MiB starting at "free_aligned_phys" to bootstrap ourselves. Start by // taking inventory of free pages. page_block *free_head = nullptr; page_block *used_head = nullptr; - next_free = gather_block_lists(next_free, memory_map, map_length, desc_length, + free_next = gather_block_lists( + free_aligned_phys, free_next, + memory_map, map_length, desc_length, &free_head, &used_head); // Unused page_block structs go here - finish out the current page with them - page_block *cache_head = fill_page_with_blocks(next_free); - next_free = page_align(next_free); + page_block *cache_head = fill_page_with_blocks(free_next); + free_next = page_align(free_next); // Now go back through these lists and consolidate - page_block **cache = &cache_head; - *cache = free_head->list_consolidate(); - while (*cache) cache = &(*cache)->next; - *cache = used_head->list_consolidate(); + page_block *freed = free_head->list_consolidate(); + cache_head->list_append(freed); + + freed = used_head->list_consolidate(); + cache_head->list_append(freed); // Ok, now build an acutal set of kernel page tables that just contains // what the kernel actually has mapped. - page_table *pages = reinterpret_cast(next_free); + page_table *pages = reinterpret_cast(free_next); unsigned consumed_pages = 1; // We're about to make a PML4, start with 1:w // Finally, remap the existing mappings, but making everything writable @@ -378,11 +330,14 @@ memory_initialize_managers(const void *memory_map, size_t map_length, size_t des consumed_pages += page_in(pml4, cur->physical_address, cur->virtual_address, cur->count, pages + consumed_pages); } - next_free += (consumed_pages * page_manager::page_size); + free_next += (consumed_pages * page_manager::page_size); + + // Put our new PML4 into CR3 to start using it + __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) ); // We now have all used memory mapped ourselves. Let the page_manager take // over from here. g_page_manager.init( free_head, used_head, cache_head, - free_region_start, 1024, next_free); + free_start, 1024, free_next); } diff --git a/src/kernel/memory_pages.cpp b/src/kernel/memory_pages.cpp index 5e661b5..b3b4731 100644 --- a/src/kernel/memory_pages.cpp +++ b/src/kernel/memory_pages.cpp @@ -5,31 +5,77 @@ page_manager g_page_manager; -page_block * -page_block::list_consolidate() +struct free_page_header { - page_block *freed_head = nullptr, **freed = &freed_head; - for (page_block *cur = this; cur; cur = cur->next) { - page_block *next = cur->next; + free_page_header *next; + size_t count; +}; - if (next && cur->flags == next->flags && - cur->physical_end() == next->physical_address) - { - cur->count += next->count; - cur->next = next->next; - - next->next = 0; - *freed = next; - freed = &next->next; - continue; - } - } - - return freed_head; +size_t +page_block::list_count() +{ + size_t i = 0; + for (page_block *b = this; b; b = b->next) ++i; + return i; } void -page_block::list_dump(const char *name) +page_block::list_append(page_block *list) +{ + page_block *cur = this; + while (cur->next) cur = cur->next; + cur->next = list; +} + +page_block * +page_block::list_insert(page_block *block) +{ + page_block *cur = this; + page_block **prev = nullptr; + while (cur->physical_address < block->physical_address) { + prev = &cur->next; + cur = cur->next; + } + + block->next = cur; + if (prev) { + *prev = block; + return this; + } + return block; +} + +page_block * +page_block::list_consolidate() +{ + page_block *freed = nullptr; + page_block *cur = this; + + while (cur) { + page_block *next = cur->next; + + if (next && + cur->flags == next->flags && + cur->physical_end() == next->physical_address && + (!cur->has_flag(page_block_flags::mapped) || + cur->virtual_end() == next->virtual_address)) { + + cur->count += next->count; + cur->next = next->next; + + next->zero(freed); + freed = next; + continue; + } + + cur = cur->next; + } + + return freed; +} + +void +page_block::list_dump(const char *name, bool show_unmapped) { console *cons = console::get(); cons->puts("Block list"); @@ -41,6 +87,10 @@ page_block::list_dump(const char *name) int count = 0; for (page_block *cur = this; cur; cur = cur->next) { + count += 1; + if (!(show_unmapped || cur->has_flag(page_block_flags::mapped))) + continue; + cons->puts(" "); cons->put_hex(cur->physical_address); cons->puts(" "); @@ -53,7 +103,6 @@ page_block::list_dump(const char *name) cons->put_dec(cur->count); cons->puts("]\n"); - count += 1; } cons->puts(" Total: "); @@ -61,6 +110,26 @@ page_block::list_dump(const char *name) cons->puts("\n"); } +void +page_block::zero(page_block *set_next) +{ + physical_address = 0; + virtual_address = 0; + count = 0; + flags = page_block_flags::free; + next = set_next; +} + +void +page_block::copy(page_block *other) +{ + physical_address = other->physical_address; + virtual_address = other->virtual_address; + count = other->count; + flags = other->flags; + next = other->next; +} + page_manager::page_manager() : m_free(nullptr), @@ -77,7 +146,185 @@ page_manager::init( page_block *used, page_block *block_cache, uint64_t scratch_start, - uint64_t scratch_length, + uint64_t scratch_pages, uint64_t scratch_cur) { + m_free = free; + m_used = used; + m_block_cache = block_cache; + + kassert(scratch_cur == page_align(scratch_cur), + "Current scratch space pointer is not page-aligned."); + + uint64_t scratch_end = scratch_start + page_size * scratch_pages; + uint64_t unused_pages = (scratch_end - scratch_cur) / page_size; + + console *cons = console::get(); + + unmap_pages(scratch_cur, unused_pages); + consolidate_blocks(); + + uint64_t scratch_aligned_start = page_table_align(scratch_start); + if (scratch_aligned_start != scratch_start) { + free_page_header *header = + reinterpret_cast(scratch_start); + header->count = (scratch_aligned_start - scratch_start) / page_size; + header->next = m_page_cache; + m_page_cache = header; + } } + +void +page_manager::free_blocks(page_block *block) +{ + if (!block) return; + + page_block *cur = block; + while (cur) { + page_block *next = cur->next; + cur->zero(cur->next ? cur->next : m_block_cache); + cur = next; + } + + m_block_cache = block; +} + +page_block * +page_manager::get_block() +{ + page_block *block = m_block_cache; + if (block) { + m_block_cache = block->next; + block->next = 0; + return block; + } else { + kassert(0, "NYI: page_manager::get_block() needed to allocate."); + } +} + +void * +page_manager::map_pages(uint64_t address, unsigned count) +{ +} + +void +page_manager::unmap_pages(uint64_t address, unsigned count) +{ + page_block **prev = &m_used; + page_block *cur = m_used; + while (cur && !cur->contains(address)) { + prev = &cur->next; + cur = cur->next; + } + + kassert(cur, "Couldn't find existing mapped pages to unmap"); + + uint64_t leading = address - cur->virtual_address; + uint64_t trailing = cur->virtual_end() - (address + page_size*count); + + if (leading) { + page_block *lead_block = get_block(); + lead_block->copy(cur); + lead_block->next = cur; + lead_block->count = leading / page_size; + *prev = lead_block; + prev = &lead_block->next; + } + + if (trailing) { + page_block *trail_block = get_block(); + trail_block->copy(cur); + trail_block->next = cur->next; + trail_block->count = trailing / page_size; + cur->next = trail_block; + } + + *prev = cur->next; + cur->next = nullptr; + cur->flags = cur->flags & ~(page_block_flags::used | page_block_flags::mapped); + m_free->list_insert(cur); +} + +void +page_manager::consolidate_blocks() +{ + m_block_cache->list_append(m_free->list_consolidate()); + m_block_cache->list_append(m_used->list_consolidate()); +} + +static unsigned +check_needs_page(page_table *table, unsigned index, page_table **free_pages) +{ + if (table->entries[index] & 0x1 == 1) return 0; + + kassert(*free_pages, "check_needs_page needed to allocate but had no free pages"); + + page_table *new_table = (*free_pages)++; + for (int i=0; i<512; ++i) new_table->entries[i] = 0; + table->entries[index] = reinterpret_cast(new_table) | 0xb; + return 1; +} + +unsigned +page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count, page_table *free_pages) +{ + page_table_indices idx{virt_addr}; + page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; + + unsigned pages_consumed = 0; + for (; idx[0] < 512; idx[0] += 1) { + pages_consumed += check_needs_page(tables[0], idx[0], &free_pages); + tables[1] = reinterpret_cast( + tables[0]->entries[idx[0]] & ~0xfffull); + + for (; idx[1] < 512; idx[1] += 1, idx[2] = 0, idx[3] = 0) { + pages_consumed += check_needs_page(tables[1], idx[1], &free_pages); + tables[2] = reinterpret_cast( + tables[1]->entries[idx[1]] & ~0xfffull); + + for (; idx[2] < 512; idx[2] += 1, idx[3] = 0) { + pages_consumed += check_needs_page(tables[2], idx[2], &free_pages); + tables[3] = reinterpret_cast( + tables[2]->entries[idx[2]] & ~0xfffull); + + for (; idx[3] < 512; idx[3] += 1) { + tables[3]->entries[idx[3]] = phys_addr | 0xb; + phys_addr += page_manager::page_size; + if (--count == 0) return pages_consumed; + } + } + } + } + + kassert(0, "Ran to end of page_in"); +} + +void +page_out(page_table *pml4, uint64_t virt_addr, uint64_t count) +{ + page_table_indices idx{virt_addr}; + page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; + + for (; idx[0] < 512; idx[0] += 1) { + tables[1] = reinterpret_cast( + tables[0]->entries[idx[0]] & ~0xfffull); + + for (; idx[1] < 512; idx[1] += 1) { + tables[2] = reinterpret_cast( + tables[1]->entries[idx[1]] & ~0xfffull); + + for (; idx[2] < 512; idx[2] += 1) { + tables[3] = reinterpret_cast( + tables[2]->entries[idx[2]] & ~0xfffull); + + for (; idx[3] < 512; idx[3] += 1) { + tables[3]->entries[idx[3]] = 0; + if (--count == 0) return; + } + } + } + } + + kassert(0, "Ran to end of page_out"); +} + diff --git a/src/kernel/memory_pages.h b/src/kernel/memory_pages.h index 31086cb..e6114d0 100644 --- a/src/kernel/memory_pages.h +++ b/src/kernel/memory_pages.h @@ -2,24 +2,26 @@ /// \file memory_pages.h /// The page memory manager and related definitions. +#include #include #include "kutil/enum_bitfields.h" struct page_block; -struct free_page; +struct free_page_header; /// Manager for allocation of physical pages. class page_manager { public: - page_manager(); - static const uint64_t page_size = 0x1000; static const uint64_t high_offset = 0xffff800000000000; - page_manager(const page_manager &) = delete; + page_manager(); + + void * map_pages(uint64_t address, unsigned count); + void unmap_pages(uint64_t address, unsigned count); private: friend void memory_initialize_managers(const void *, size_t, size_t); @@ -36,17 +38,30 @@ private: /// Initialize the virtual memory manager based on this object's state void init_memory_manager(); + /// Create a `page_block` struct or pull one from the cache. + /// \returns An empty `page_block` struct + page_block * get_block(); + + /// Return a list of `page_block` structs to the cache. + /// \arg block A list of `page_block` structs + void free_blocks(page_block *block); + + /// Consolidate the free and used block lists. Return freed blocks + /// to the cache. + void consolidate_blocks(); + page_block *m_free; ///< Free pages list page_block *m_used; ///< In-use pages list page_block *m_block_cache; ///< Cache of unused page_block structs - free_page *m_page_cache; ///< Cache of free pages to use for tables + free_page_header *m_page_cache; ///< Cache of free pages to use for tables + + page_manager(const page_manager &) = delete; }; /// Global page manager. extern page_manager g_page_manager; - /// Flags used by `page_block`. enum class page_block_flags : uint32_t { @@ -75,9 +90,35 @@ struct page_block page_block_flags flags; page_block *next; - bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); } - uint64_t physical_end() const { return physical_address + (count * page_manager::page_size); } - uint64_t virtual_end() const { return virtual_address + (count * page_manager::page_size); } + inline bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); } + inline uint64_t physical_end() const { return physical_address + (count * page_manager::page_size); } + inline uint64_t virtual_end() const { return virtual_address + (count * page_manager::page_size); } + inline bool contains(uint64_t vaddr) const { return vaddr >= virtual_address && vaddr < virtual_end(); } + + /// Helper to zero out a block and optionally set the next pointer. + /// \arg next [optional] The value for the `next` pointer + void zero(page_block *set_next = nullptr); + + /// Helper to copy a bock from another block + /// \arg other The block to copy from + void copy(page_block *other); + + /// \name Linked list functions + /// Functions to act on a `page_block *` as a linked list + /// @{ + + /// Count the items in this linked list. + /// \returns The number of entries in the list. + size_t list_count(); + + /// Append the gien block or list to this lit. + /// \arg list The list to append to the current list + void list_append(page_block *list); + + /// Sorted-insert of a block into the list. + /// \arg block The single block to insert + /// \returns The new list head + page_block * list_insert(page_block *block); /// Traverse the list, joining adjacent blocks where possible. /// \returns A linked list of freed page_block structures. @@ -85,7 +126,20 @@ struct page_block /// Traverse the list, printing debug info on this list. /// \arg name [optional] String to print as the name of this list - void list_dump(const char *name = nullptr); + /// \arg show_permanent [optional] If false, hide unmapped blocks + void list_dump(const char *name = nullptr, bool show_unmapped = false); + + /// @} +}; + + +/// Struct to allow easy accessing of a memory page being used as a page table. +struct page_table +{ + uint64_t entries[512]; + inline page_table * next(int i) const { + return reinterpret_cast(entries[i] & ~0xfffull); + } }; @@ -119,3 +173,26 @@ template inline T page_align(T p) /// \arg p The address to align. /// \returns The next page-table-aligned address _after_ `p`. template inline T page_table_align(T p) { return ((p - 1) & ~0x1fffffull) + 0x200000; } + +/// Low-level routine for mapping a number of pages into the given page table. +/// \arg pml4 The root page table to map into +/// \arg phys_addr The starting physical address of the pages to be mapped +/// \arg virt_addr The starting virtual address ot the memory to be mapped +/// \arg count The number of pages to map +/// \arg free_pages A pointer to a list of free, mapped pages to use for new page tables. +/// \returns The number of pages consumed from `free_pages`. +unsigned page_in( + page_table *pml4, + uint64_t phys_addr, + uint64_t virt_addr, + uint64_t count, + page_table *free_pages); + +/// Low-level routine for unmapping a number of pages from the given page table. +/// \arg pml4 The root page table for this mapping +/// \arg virt_addr The starting virtual address ot the memory to be unmapped +/// \arg count The number of pages to unmap +void page_out( + page_table *pml4, + uint64_t virt_addr, + uint64_t count);