diff --git a/src/kernel/memory_bootstrap.cpp b/src/kernel/memory_bootstrap.cpp index ee377d4..62262a7 100644 --- a/src/kernel/memory_bootstrap.cpp +++ b/src/kernel/memory_bootstrap.cpp @@ -111,7 +111,7 @@ memory_initialize_pre_ctors(args::header *kargs) // Create the page manager new (&g_page_manager) page_manager {g_frame_allocator, kpml4}; - vm_space &vm = *new (&g_kernel_space) vm_space {kpml4, true}; + vm_space &vm = *new (&g_kernel_space) vm_space {kpml4}; vm.allow(memory::heap_start, memory::kernel_max_heap, true); } diff --git a/src/kernel/page_manager.cpp b/src/kernel/page_manager.cpp index 1d3cfb1..afadc35 100644 --- a/src/kernel/page_manager.cpp +++ b/src/kernel/page_manager.cpp @@ -37,16 +37,10 @@ pt_from_phys(uintptr_t p) } -struct free_page_header -{ - free_page_header *next; - size_t count; -}; page_manager::page_manager(frame_allocator &frames, page_table *pml4) : m_kernel_pml4(pml4), - m_page_cache(nullptr), m_frames(frames) { } @@ -54,7 +48,7 @@ page_manager::page_manager(frame_allocator &frames, page_table *pml4) : page_table * page_manager::create_process_map() { - page_table *table = get_table_page(); + page_table *table = page_table::get_table_page(); kutil::memset(table, 0, frame_size/2); for (unsigned i = pml4e_kernel; i < table_entries; ++i) @@ -107,47 +101,6 @@ page_manager::dump_pml4(page_table *pml4, bool recurse) pml4->dump(page_table::level::pml4, recurse); } -page_table * -page_manager::get_table_page() -{ - if (!m_page_cache) { - uintptr_t phys = 0; - size_t n = m_frames.allocate(32, &phys); // TODO: indicate frames must be offset-mappable - uintptr_t virt = phys + page_offset; - - m_page_cache = reinterpret_cast(virt); - - // The last one needs to be null, so do n-1 - uintptr_t end = virt + (n-1) * frame_size; - while (virt < end) { - reinterpret_cast(virt)->next = - reinterpret_cast(virt + frame_size); - virt += frame_size; - } - reinterpret_cast(virt)->next = nullptr; - - log::info(logs::paging, "Mappd %d new page table pages at %lx", n, phys); - } - - free_page_header *page = m_page_cache; - m_page_cache = page->next; - - return reinterpret_cast(page); -} - -void -page_manager::free_table_pages(void *pages, size_t count) -{ - uintptr_t start = reinterpret_cast(pages); - for (size_t i = 0; i < count; ++i) { - uintptr_t addr = start + (i * frame_size); - free_page_header *header = reinterpret_cast(addr); - header->count = 1; - header->next = m_page_cache; - m_page_cache = header; - } -} - void * page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4) { @@ -233,7 +186,7 @@ page_manager::unmap_table(page_table *table, page_table::level lvl, bool free, p m_frames.free(free_start, (free_count * size) / frame_size); } - free_table_pages(table, 1); + page_table::free_table_page(table); log::debug(logs::paging, "Unmapped%s lv %d table at %016lx", free ? " (and freed)" : "", lvl, table); @@ -255,7 +208,7 @@ page_manager::check_needs_page(page_table *table, unsigned index, bool user) { if ((table->entries[index] & 0x1) == 1) return; - page_table *new_table = get_table_page(); + page_table *new_table = page_table::get_table_page(); for (int i=0; ientries[i] = 0; table->entries[index] = pt_to_phys(new_table) | (user ? user_table_flags : sys_table_flags); } diff --git a/src/kernel/page_manager.h b/src/kernel/page_manager.h index 6bbc236..28b46d9 100644 --- a/src/kernel/page_manager.h +++ b/src/kernel/page_manager.h @@ -90,15 +90,6 @@ public: inline page_table * get_kernel_pml4() { return m_kernel_pml4; } private: - /// Allocate a page for a page table, or pull one from the cache - /// \returns An empty page mapped in page space - page_table * get_table_page(); - - /// Return a set of mapped contiguous pages to the page cache. - /// \arg pages Pointer to the first page to be returned - /// \arg count Number of pages in the range - void free_table_pages(void *pages, size_t count); - /// Helper function to allocate a new page table. If table entry `i` in /// table `base` is empty, allocate a new page table and point `base[i]` at /// it. @@ -138,7 +129,6 @@ private: page_table_indices index = {}); page_table *m_kernel_pml4; ///< The PML4 of just kernel pages - free_page_header *m_page_cache; ///< Cache of free pages to use for tables frame_allocator &m_frames; diff --git a/src/kernel/page_table.cpp b/src/kernel/page_table.cpp index b3b220f..1094adc 100644 --- a/src/kernel/page_table.cpp +++ b/src/kernel/page_table.cpp @@ -8,6 +8,11 @@ using memory::page_offset; using level = page_table::level; +extern frame_allocator &g_frame_allocator; + +free_page_header * page_table::s_page_cache = nullptr; +size_t page_table::s_cache_count = 0; + // Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003 // IGNORED | | | | | | | +- Present // | | | | | | +--- Writeable @@ -20,6 +25,7 @@ using level = page_table::level; /// Page table entry flags for entries pointing at another table constexpr uint16_t table_flags = 0x003; + page_table::iterator::iterator(uintptr_t virt, page_table *pml4) : m_table {pml4, 0, 0, 0} { @@ -157,9 +163,6 @@ page_table::iterator::ensure_table(level l) if (l == level::pml4 || l > level::pt) return; if (check_table(l)) return; - // TODO: a better way to get at the frame allocator - extern frame_allocator g_frame_allocator; - uintptr_t phys = 0; size_t n = g_frame_allocator.allocate(1, &phys); kassert(n, "Failed to allocate a page table"); @@ -195,6 +198,55 @@ page_table::set(int i, page_table *p, uint16_t flags) (flags & 0xfff); } +struct free_page_header { free_page_header *next; }; + +page_table * +page_table::get_table_page() +{ + if (!s_cache_count) + fill_table_page_cache(); + + free_page_header *page = s_page_cache; + s_page_cache = s_page_cache->next; + --s_cache_count; + + return reinterpret_cast(page); +} + +void +page_table::free_table_page(page_table *pt) +{ + free_page_header *page = + reinterpret_cast(pt); + page->next = s_page_cache; + s_page_cache = page->next; + ++s_cache_count; +} + +void +page_table::fill_table_page_cache() +{ + constexpr size_t min_pages = 16; + + while (s_cache_count < min_pages) { + uintptr_t phys = 0; + size_t n = g_frame_allocator.allocate(min_pages - s_cache_count, &phys); + + free_page_header *start = + memory::to_virtual(phys); + + for (int i = 0; i < n - 1; ++i) + kutil::offset_pointer(start, i * memory::frame_size) + ->next = kutil::offset_pointer(start, (i+1) * memory::frame_size); + + free_page_header *end = + kutil::offset_pointer(start, (n-1) * memory::frame_size); + + end->next = s_page_cache; + s_page_cache = start; + s_cache_count += n; + } +} void page_table::dump(page_table::level lvl, bool recurse) diff --git a/src/kernel/page_table.h b/src/kernel/page_table.h index f9cd680..9f428eb 100644 --- a/src/kernel/page_table.h +++ b/src/kernel/page_table.h @@ -5,6 +5,7 @@ #include #include "kernel_memory.h" +struct free_page_header; class page_manager; /// Struct to allow easy accessing of a memory page being used as a page table. @@ -111,6 +112,21 @@ struct page_table uint16_t m_index[D]; }; + /// Allocate a page for a page table, or pull one from the cache + /// \returns An empty page, mapped in the linear offset area + static page_table * get_table_page(); + + /// Return a page table's page to the page cache. + /// \arg pt The page to be returned + static void free_table_page(page_table *pt); + + // Ensure the page table page cache has a minimum number of pages + // in it. + static void fill_table_page_cache(); + + static free_page_header *s_page_cache; ///< Cache of free pages to use for tables + static size_t s_cache_count; ///< Number of pages in s_page_cache + /// Get an entry in the page table as a page_table pointer /// \arg i Index of the entry in this page table /// \arg flags [out] If set, this will receive the entry's flags diff --git a/src/kernel/vm_space.cpp b/src/kernel/vm_space.cpp index 49bcfcb..f0c6c9c 100644 --- a/src/kernel/vm_space.cpp +++ b/src/kernel/vm_space.cpp @@ -19,9 +19,10 @@ vm_space::area::operator==(const vm_space::area &o) const } -vm_space::vm_space(page_table *p, bool kernel) : - m_kernel(kernel), - m_pml4(p) +vm_space::vm_space(page_table *p) : m_kernel(true), m_pml4(p) {} + +vm_space::vm_space() : + m_kernel(false) { } diff --git a/src/kernel/vm_space.h b/src/kernel/vm_space.h index 0068194..1c984e2 100644 --- a/src/kernel/vm_space.h +++ b/src/kernel/vm_space.h @@ -14,10 +14,12 @@ class vm_area; class vm_space { public: - /// Constructor. - /// \arg pml4 The pml4 for this address space - /// \arg kernel True if this is the kernel address space - vm_space(page_table *pml4, bool kernel = false); + /// Constructor for the kernel address space + /// \arg pml4 The existing kernel PML4 + vm_space(page_table *pml4); + + /// Constructor. Creates a new address space. + vm_space(); ~vm_space(); @@ -81,6 +83,7 @@ private: bool m_kernel; page_table *m_pml4; + struct area { uintptr_t base; vm_area *area;