mirror of
https://github.com/justinian/jsix.git
synced 2025-12-10 00:14:32 -08:00
[kernel] Fix some page allocation bugs
Fixing two page allocation issues I came across while debugging:
- Added a spinlock to the page_table static page cache, to avoid
multiple CPUs grabbing the same page. This cache should probably
just be made into per-CPU caches.
- Fixed a bitwise math issue ("1" instead of "1ull" when working with
64-bit numbers) that made it so that pages were never marked as
allocated when allocating 32 or more.
This commit is contained in:
@@ -59,16 +59,16 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
|
||||
*address = block.base + frame * frame_size;
|
||||
|
||||
// Clear the bits to mark these pages allocated
|
||||
m3 &= ~(((1 << n) - 1) << o3);
|
||||
m3 &= ~(((1ull << n) - 1) << o3);
|
||||
block.bitmap[(o1 << 6) + o2] = m3;
|
||||
if (!m3) {
|
||||
// if that was it for this group, clear the next level bit
|
||||
m2 &= ~(1 << o2);
|
||||
m2 &= ~(1ull << o2);
|
||||
block.map2[o1] = m2;
|
||||
|
||||
if (!m2) {
|
||||
// if that was cleared too, update the top level
|
||||
block.map1 &= ~(1 << o1);
|
||||
block.map1 &= ~(1ull << o1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ using level = page_table::level;
|
||||
|
||||
free_page_header * page_table::s_page_cache = nullptr;
|
||||
size_t page_table::s_cache_count = 0;
|
||||
kutil::spinlock page_table::s_lock;
|
||||
constexpr size_t page_table::entry_sizes[4];
|
||||
|
||||
|
||||
@@ -174,12 +175,20 @@ struct free_page_header { free_page_header *next; };
|
||||
page_table *
|
||||
page_table::get_table_page()
|
||||
{
|
||||
free_page_header *page = nullptr;
|
||||
|
||||
{
|
||||
kutil::scoped_lock lock(s_lock);
|
||||
|
||||
if (!s_cache_count)
|
||||
fill_table_page_cache();
|
||||
|
||||
free_page_header *page = s_page_cache;
|
||||
kassert(s_page_cache, "Somehow the page cache pointer is null");
|
||||
|
||||
page = s_page_cache;
|
||||
s_page_cache = s_page_cache->next;
|
||||
--s_cache_count;
|
||||
}
|
||||
|
||||
kutil::memset(page, 0, memory::frame_size);
|
||||
return reinterpret_cast<page_table*>(page);
|
||||
@@ -188,22 +197,24 @@ page_table::get_table_page()
|
||||
void
|
||||
page_table::free_table_page(page_table *pt)
|
||||
{
|
||||
kutil::scoped_lock lock(s_lock);
|
||||
free_page_header *page =
|
||||
reinterpret_cast<free_page_header*>(pt);
|
||||
page->next = s_page_cache;
|
||||
s_page_cache = page->next;
|
||||
s_page_cache = page;
|
||||
++s_cache_count;
|
||||
}
|
||||
|
||||
void
|
||||
page_table::fill_table_page_cache()
|
||||
{
|
||||
constexpr size_t min_pages = 16;
|
||||
constexpr size_t min_pages = 32;
|
||||
|
||||
frame_allocator &fa = frame_allocator::get();
|
||||
while (s_cache_count < min_pages) {
|
||||
uintptr_t phys = 0;
|
||||
size_t n = fa.allocate(min_pages - s_cache_count, &phys);
|
||||
kassert(phys, "Got physical page 0 as a page table");
|
||||
|
||||
free_page_header *start =
|
||||
memory::to_virtual<free_page_header>(phys);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <stdint.h>
|
||||
#include "enum_bitfields.h"
|
||||
#include "kernel_memory.h"
|
||||
#include "kutil/spinlock.h"
|
||||
|
||||
struct free_page_header;
|
||||
|
||||
@@ -141,6 +142,7 @@ struct page_table
|
||||
|
||||
static free_page_header *s_page_cache; ///< Cache of free pages to use for tables
|
||||
static size_t s_cache_count; ///< Number of pages in s_page_cache
|
||||
static kutil::spinlock s_lock; ///< Lock for shared page cache
|
||||
|
||||
/// Get an entry in the page table as a page_table pointer
|
||||
/// \arg i Index of the entry in this page table
|
||||
|
||||
Reference in New Issue
Block a user