diff --git a/src/kernel/frame_allocator.cpp b/src/kernel/frame_allocator.cpp index 1d26f98..217a0ca 100644 --- a/src/kernel/frame_allocator.cpp +++ b/src/kernel/frame_allocator.cpp @@ -59,16 +59,16 @@ frame_allocator::allocate(size_t count, uintptr_t *address) *address = block.base + frame * frame_size; // Clear the bits to mark these pages allocated - m3 &= ~(((1 << n) - 1) << o3); + m3 &= ~(((1ull << n) - 1) << o3); block.bitmap[(o1 << 6) + o2] = m3; if (!m3) { // if that was it for this group, clear the next level bit - m2 &= ~(1 << o2); + m2 &= ~(1ull << o2); block.map2[o1] = m2; if (!m2) { // if that was cleared too, update the top level - block.map1 &= ~(1 << o1); + block.map1 &= ~(1ull << o1); } } diff --git a/src/kernel/page_table.cpp b/src/kernel/page_table.cpp index e3f07d4..e14eaef 100644 --- a/src/kernel/page_table.cpp +++ b/src/kernel/page_table.cpp @@ -10,6 +10,7 @@ using level = page_table::level; free_page_header * page_table::s_page_cache = nullptr; size_t page_table::s_cache_count = 0; +kutil::spinlock page_table::s_lock; constexpr size_t page_table::entry_sizes[4]; @@ -174,12 +175,20 @@ struct free_page_header { free_page_header *next; }; page_table * page_table::get_table_page() { - if (!s_cache_count) - fill_table_page_cache(); + free_page_header *page = nullptr; - free_page_header *page = s_page_cache; - s_page_cache = s_page_cache->next; - --s_cache_count; + { + kutil::scoped_lock lock(s_lock); + + if (!s_cache_count) + fill_table_page_cache(); + + kassert(s_page_cache, "Somehow the page cache pointer is null"); + + page = s_page_cache; + s_page_cache = s_page_cache->next; + --s_cache_count; + } kutil::memset(page, 0, memory::frame_size); return reinterpret_cast(page); @@ -188,22 +197,24 @@ page_table::get_table_page() void page_table::free_table_page(page_table *pt) { + kutil::scoped_lock lock(s_lock); free_page_header *page = reinterpret_cast(pt); page->next = s_page_cache; - s_page_cache = page->next; + s_page_cache = page; ++s_cache_count; } void page_table::fill_table_page_cache() { - constexpr size_t min_pages = 16; + constexpr size_t min_pages = 32; frame_allocator &fa = frame_allocator::get(); while (s_cache_count < min_pages) { uintptr_t phys = 0; size_t n = fa.allocate(min_pages - s_cache_count, &phys); + kassert(phys, "Got physical page 0 as a page table"); free_page_header *start = memory::to_virtual(phys); diff --git a/src/kernel/page_table.h b/src/kernel/page_table.h index 267a299..632e712 100644 --- a/src/kernel/page_table.h +++ b/src/kernel/page_table.h @@ -5,6 +5,7 @@ #include #include "enum_bitfields.h" #include "kernel_memory.h" +#include "kutil/spinlock.h" struct free_page_header; @@ -141,6 +142,7 @@ struct page_table static free_page_header *s_page_cache; ///< Cache of free pages to use for tables static size_t s_cache_count; ///< Number of pages in s_page_cache + static kutil::spinlock s_lock; ///< Lock for shared page cache /// Get an entry in the page table as a page_table pointer /// \arg i Index of the entry in this page table