diff --git a/src/kernel/memory_pages.cpp b/src/kernel/memory_pages.cpp index e35aef7..3cf9957 100644 --- a/src/kernel/memory_pages.cpp +++ b/src/kernel/memory_pages.cpp @@ -6,15 +6,17 @@ page_manager g_page_manager; +using addr_t = page_manager::addr_t; -static uint64_t + +static addr_t pt_to_phys(page_table *pt) { - return reinterpret_cast(pt) - page_manager::page_offset; + return reinterpret_cast(pt) - page_manager::page_offset; } static page_table * -pt_from_phys(uint64_t p) +pt_from_phys(addr_t p) { return reinterpret_cast((p + page_manager::page_offset) & ~0xfffull); } @@ -194,9 +196,9 @@ page_manager::init( // Fix up the offset-marked pointers for (unsigned i = 0; i < m_marked_pointer_count; ++i) { - uint64_t p = reinterpret_cast(m_marked_pointers[i]); - uint64_t v = p + page_offset; - uint64_t c = (m_marked_pointer_lengths[i] / page_size) + 1; + addr_t p = reinterpret_cast(m_marked_pointers[i]); + addr_t v = p + page_offset; + addr_t c = (m_marked_pointer_lengths[i] / page_size) + 1; // TODO: cleanly search/split this as a block out of used/free if possible page_block *block = get_block(); @@ -216,11 +218,8 @@ page_manager::init( consolidate_blocks(); - page_block::dump(m_used, "used before map", true); - page_block::dump(m_free, "free before map", true); - map_pages(0xf0000000 + high_offset, 120); - page_block::dump(m_used, "used after map", true); - page_block::dump(m_free, "free after map", true); + page_block::dump(m_used, "used", true); + page_block::dump(m_free, "free", true); } @@ -263,9 +262,9 @@ page_table * page_manager::get_table_page() { if (!m_page_cache) { - uint64_t phys = 0; - uint64_t n = pop_pages(32, &phys); - uint64_t virt = phys + page_offset; + addr_t phys = 0; + size_t n = pop_pages(32, &phys); + addr_t virt = phys + page_offset; page_block *block = get_block(); block->physical_address = phys; @@ -278,7 +277,7 @@ page_manager::get_table_page() m_page_cache = reinterpret_cast(virt); // The last one needs to be null, so do n-1 - uint64_t end = virt + (n-1) * page_size; + addr_t end = virt + (n-1) * page_size; while (virt < end) { reinterpret_cast(virt)->next = reinterpret_cast(virt + page_size); @@ -297,9 +296,9 @@ page_manager::get_table_page() void page_manager::free_table_pages(void *pages, size_t count) { - uint64_t start = reinterpret_cast(pages); + addr_t start = reinterpret_cast(pages); for (size_t i = 0; i < count; ++i) { - uint64_t addr = start + (i * page_size); + addr_t addr = start + (i * page_size); free_page_header *header = reinterpret_cast(addr); header->count = 1; header->next = m_page_cache; @@ -315,7 +314,7 @@ page_manager::consolidate_blocks() } void * -page_manager::map_pages(uint64_t address, unsigned count) +page_manager::map_pages(addr_t address, size_t count) { void *ret = reinterpret_cast(address); page_table *pml4 = get_pml4(); @@ -323,7 +322,7 @@ page_manager::map_pages(uint64_t address, unsigned count) while (count) { kassert(m_free, "page_manager::map_pages ran out of free pages!"); - uint64_t phys = 0; + addr_t phys = 0; size_t n = pop_pages(count, &phys); page_block *block = get_block(); @@ -342,7 +341,7 @@ page_manager::map_pages(uint64_t address, unsigned count) } void -page_manager::unmap_pages(uint64_t address, unsigned count) +page_manager::unmap_pages(addr_t address, size_t count) { page_block **prev = &m_used; page_block *cur = m_used; @@ -353,17 +352,17 @@ page_manager::unmap_pages(uint64_t address, unsigned count) kassert(cur, "Couldn't find existing mapped pages to unmap"); - uint64_t size = page_size * count; - uint64_t end = address + size; + size_t size = page_size * count; + addr_t end = address + size; while (cur && cur->contains(address)) { - uint64_t leading = address - cur->virtual_address; - uint64_t trailing = + size_t leading = address - cur->virtual_address; + size_t trailing = end > cur->virtual_end() ? 0 : (cur->virtual_end() - end); if (leading) { - uint64_t pages = leading / page_size; + size_t pages = leading / page_size; page_block *lead_block = get_block(); lead_block->copy(cur); @@ -379,7 +378,7 @@ page_manager::unmap_pages(uint64_t address, unsigned count) } if (trailing) { - uint64_t pages = trailing / page_size; + size_t pages = trailing / page_size; page_block *trail_block = get_block(); trail_block->copy(cur); @@ -417,7 +416,7 @@ page_manager::check_needs_page(page_table *table, unsigned index) } void -page_manager::page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, uint64_t count) +page_manager::page_in(page_table *pml4, addr_t phys_addr, addr_t virt_addr, size_t count) { page_table_indices idx{virt_addr}; page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; @@ -447,7 +446,7 @@ page_manager::page_in(page_table *pml4, uint64_t phys_addr, uint64_t virt_addr, } void -page_manager::page_out(page_table *pml4, uint64_t virt_addr, uint64_t count) +page_manager::page_out(page_table *pml4, addr_t virt_addr, size_t count) { page_table_indices idx{virt_addr}; page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; @@ -476,7 +475,7 @@ page_manager::page_out(page_table *pml4, uint64_t virt_addr, uint64_t count) } size_t -page_manager::pop_pages(size_t count, uint64_t *address) +page_manager::pop_pages(size_t count, addr_t *address) { kassert(m_free, "page_manager::pop_pages ran out of free pages!"); diff --git a/src/kernel/memory_pages.h b/src/kernel/memory_pages.h index 8ff8de2..5a87f18 100644 --- a/src/kernel/memory_pages.h +++ b/src/kernel/memory_pages.h @@ -16,14 +16,16 @@ struct free_page_header; class page_manager { public: + using addr_t = uint64_t; + /// Size of a single page. - static const uint64_t page_size = 0x1000; + static const size_t page_size = 0x1000; /// Start of the higher half. - static const uint64_t high_offset = 0xffff800000000000; + static const addr_t high_offset = 0xffff800000000000; /// Offset from physical where page tables are mapped. - static const uint64_t page_offset = 0xffffff8000000000; + static const addr_t page_offset = 0xffffff8000000000; page_manager(); @@ -31,12 +33,12 @@ public: /// \arg address The virtual address at which to map the pages /// \arg count The number of pages to map /// \returns A pointer to the start of the mapped region - void * map_pages(uint64_t address, unsigned count); + void * map_pages(addr_t address, size_t count); /// Unmap existing pages from memory. /// \arg address The virtual address of the memory to unmap /// \arg count The number of pages to unmap - void unmap_pages(uint64_t address, unsigned count); + void unmap_pages(addr_t address, size_t count); /// Mark a pointer and range to be offset-mapped. This pointer will /// automatically get updated once page_manager::init() is called. @@ -81,7 +83,7 @@ private: /// \returns A pointer to the current PML4 table. static inline page_table * get_pml4() { - uint64_t pml4 = 0; + addr_t pml4 = 0; __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) ); return reinterpret_cast((pml4 & ~0xfffull) + page_offset); } @@ -90,7 +92,7 @@ private: /// \arg pml4 A pointer to the PML4 table to install. static inline void set_pml4(page_table *pml4) { - uint64_t p = reinterpret_cast(pml4) - page_offset; + addr_t p = reinterpret_cast(pml4) - page_offset; __asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p & ~0xfffull) ); } @@ -108,9 +110,9 @@ private: /// \arg count The number of pages to map void page_in( page_table *pml4, - uint64_t phys_addr, - uint64_t virt_addr, - uint64_t count); + addr_t phys_addr, + addr_t virt_addr, + size_t count); /// Low-level routine for unmapping a number of pages from the given page table. /// \arg pml4 The root page table for this mapping @@ -118,8 +120,8 @@ private: /// \arg count The number of pages to unmap void page_out( page_table *pml4, - uint64_t virt_addr, - uint64_t count); + addr_t virt_addr, + size_t count); /// Get free pages from the free list. Only pages from the first free block /// are returned, so the number may be less than requested, but they will @@ -127,7 +129,7 @@ private: /// \arg count The maximum number of pages to get /// \arg address [out] The address of the first page /// \returns The number of pages retrieved - size_t pop_pages(size_t count, uint64_t *address); + size_t pop_pages(size_t count, addr_t *address); page_block *m_free; ///< Free pages list page_block *m_used; ///< In-use pages list @@ -170,18 +172,20 @@ IS_BITFIELD(page_block_flags); /// linked list of such structures. struct page_block { - uint64_t physical_address; - uint64_t virtual_address; + using addr_t = page_manager::addr_t; + + addr_t physical_address; + addr_t virtual_address; uint32_t count; page_block_flags flags; page_block *next; inline bool has_flag(page_block_flags f) const { return bitfield_contains(flags, f); } - inline uint64_t physical_end() const { return physical_address + (count * page_manager::page_size); } - inline uint64_t virtual_end() const { return virtual_address + (count * page_manager::page_size); } + inline addr_t physical_end() const { return physical_address + (count * page_manager::page_size); } + inline addr_t virtual_end() const { return virtual_address + (count * page_manager::page_size); } - inline bool contains(uint64_t vaddr) const { return vaddr >= virtual_address && vaddr < virtual_end(); } - inline bool contains_physical(uint64_t addr) const { return addr >= physical_address && addr < physical_end(); } + inline bool contains(addr_t vaddr) const { return vaddr >= virtual_address && vaddr < virtual_end(); } + inline bool contains_physical(addr_t addr) const { return addr >= physical_address && addr < physical_end(); } /// Helper to zero out a block and optionally set the next pointer. /// \arg next [optional] The value for the `next` pointer