Convert page_block to use kutil::linked_list

- Created a new linked_list-based slab allocator
- Simplified memory bootstrap code by using the slab allocator and
  linked_lists
This commit is contained in:
Justin C. Miller
2018-09-11 20:46:48 -07:00
parent d5c44645eb
commit 593cda3ee8
9 changed files with 427 additions and 405 deletions

View File

@@ -48,13 +48,19 @@ init_console()
log::enable(logs::task, log::level::debug);
}
void do_error_3() { volatile int x = 1; volatile int y = 0; volatile int z = x / y; }
void do_error_2() { do_error_3(); }
void do_error_1() { do_error_2(); }
void
kernel_main(popcorn_data *header)
{
#ifdef DEBUG
// Run `waf configure --debug` to enable compiling with DEBUG turned on.
// Then attach to QEMU's gdb server and `set waiting = false` to start
// the kernel. This compensates for GDB's poor handling of QEMU going
// through the x86 PC startup and switching to 64 bit mode when you
// attach to qemu with the -S option.
bool waiting = true;
while (waiting);
#endif
kutil::assert_set_callback(__kernel_assert);
gdt_init();
@@ -135,11 +141,6 @@ kernel_main(popcorn_data *header)
}
*/
// do_error_1();
// __asm__ __volatile__("int $15");
// pager->dump_pml4();
syscall_enable();
scheduler *sched = new (&scheduler::get()) scheduler(devices->get_lapic());

View File

@@ -1,11 +1,34 @@
#include <utility>
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
#include "memory.h"
#include "page_manager.h"
const unsigned efi_page_size = 0x1000;
const unsigned ident_page_flags = 0xb;
namespace {
// Page-by-page initial allocator for the initial page_block allocator
struct page_consumer
{
page_consumer(addr_t start) : current(start) {}
void * operator()(size_t size) {
kassert(size == page_manager::page_size, "page_consumer used with non-page size!");
void *retval = reinterpret_cast<void *>(current);
current += size;
return retval;
}
addr_t current;
};
}
using block_list = kutil::linked_list<page_block>;
using block_allocator = kutil::slab_allocator<page_block, page_consumer &>;
enum class efi_memory_type : uint32_t
{
reserved,
@@ -104,131 +127,80 @@ desc_incr(const efi_memory_descriptor *d, size_t desc_length)
reinterpret_cast<const uint8_t *>(d) + desc_length);
}
static unsigned
count_table_pages_needed(page_block *used)
{
page_table_indices last_idx{~0ull};
unsigned counts[] = {1, 0, 0, 0};
for (page_block *cur = used; cur; cur = cur->next) {
if (!cur->has_flag(page_block_flags::mapped))
continue;
page_table_indices start{cur->virtual_address};
page_table_indices end{cur->virtual_address + (cur->count * page_manager::page_size)};
counts[1] +=
((start[0] == last_idx[0]) ? 0 : 1) +
(end[0] - start[0]);
counts[2] +=
((start[0] == last_idx[0] &&
start[1] == last_idx[1]) ? 0 : 1) +
(end[1] - start[1]);
counts[3] +=
((start[0] == last_idx[0] &&
start[1] == last_idx[1] &&
start[2] == last_idx[2]) ? 0 : 1) +
(end[2] - start[2]);
last_idx = end;
}
return counts[0] + counts[1] + counts[2] + counts[3];
}
page_block *
remove_block_for(page_block **list, uint64_t phys_start, uint64_t pages, page_block **cache)
page_block_list::item_type *
remove_block_for(page_block_list &list, addr_t phys_start, size_t pages, page_block_list &cache)
{
// This is basically just the removal portion of page_manager::unmap_pages,
// but with physical addresses, and only ever removing a single block.
page_block *prev = nullptr;
page_block *cur = *list;
while (cur && !cur->contains_physical(phys_start)) {
prev = cur;
cur = cur->next;
}
for (auto *item : list) {
if (!item->contains_physical(phys_start))
continue;
kassert(cur, "Couldn't find block to remove");
uint64_t size = page_manager::page_size * pages;
uint64_t end = phys_start + size;
uint64_t leading = phys_start - item->physical_address;
uint64_t trailing = item->physical_end() - end;
uint64_t size = page_manager::page_size * pages;
uint64_t end = phys_start + size;
uint64_t leading = phys_start - cur->physical_address;
uint64_t trailing = cur->physical_end() - end;
if (leading) {
uint64_t pages = leading / page_manager::page_size;
if (leading) {
uint64_t pages = leading / page_manager::page_size;
page_block_list::item_type *lead_block = cache.pop_front();
page_block *lead_block = *cache;
*cache = (*cache)->next;
lead_block->copy(item);
lead_block->count = pages;
lead_block->copy(cur);
lead_block->next = cur;
lead_block->count = pages;
item->count -= pages;
item->physical_address += leading;
cur->count -= pages;
cur->physical_address += leading;
if (item->virtual_address)
item->virtual_address += leading;
if (cur->virtual_address)
cur->virtual_address += leading;
if (prev) {
prev->next = lead_block;
} else {
prev = lead_block;
*list = prev;
list.insert_before(item, lead_block);
}
if (trailing) {
uint64_t pages = trailing / page_manager::page_size;
page_block_list::item_type *trail_block = cache.pop_front();
trail_block->copy(item);
trail_block->count = pages;
trail_block->physical_address += size;
item->count -= pages;
if (item->virtual_address)
trail_block->virtual_address += size;
list.insert_before(item, trail_block);
}
list.remove(item);
return item;
}
if (trailing) {
uint64_t pages = trailing / page_manager::page_size;
page_block *trail_block = *cache;
*cache = (*cache)->next;
trail_block->copy(cur);
trail_block->next = cur->next;
trail_block->count = pages;
trail_block->physical_address += size;
if (cur->virtual_address)
trail_block->virtual_address += size;
cur->count -= pages;
cur->next = trail_block;
}
prev->next = cur->next;
cur->next = nullptr;
return cur;
kassert(false, "Couldn't find block to remove");
return nullptr;
}
uint64_t
void
gather_block_lists(
uint64_t scratch_virt,
block_allocator &allocator,
block_list &used,
block_list &free,
const void *memory_map,
size_t map_length,
size_t desc_length,
page_block **free_head,
page_block **used_head)
size_t desc_length)
{
int i = 0;
page_block *free = nullptr;
page_block *used = nullptr;
page_block *block_list = reinterpret_cast<page_block *>(scratch_virt);
efi_memory_descriptor const *desc = reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length);
while (desc < end) {
page_block *block = &block_list[i++];
auto *block = allocator.pop();
block->physical_address = desc->physical_start;
block->virtual_address = desc->virtual_start;
block->count = desc->pages;
block->next = nullptr;
switch (desc->type) {
case efi_memory_type::loader_code:
@@ -264,31 +236,13 @@ gather_block_lists(
if (block->virtual_address || !block->physical_address)
block->flags |= page_block_flags::mapped;
used = page_block::insert(used, block);
used.push_back(block);
} else {
free = page_block::insert(free, block);
free.push_back(block);
}
desc = desc_incr(desc, desc_length);
}
*free_head = free;
*used_head = used;
return reinterpret_cast<uint64_t>(&block_list[i]);
}
page_block *
fill_page_with_blocks(uint64_t start)
{
uint64_t end = page_align(start);
uint64_t count = (end - start) / sizeof(page_block);
if (count == 0) return nullptr;
page_block *blocks = reinterpret_cast<page_block *>(start);
for (unsigned i = 0; i < count; ++i)
blocks[i].zero(&blocks[i+1]);
blocks[count - 1].next = nullptr;
return blocks;
}
void
@@ -459,34 +413,27 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
// We now have pages starting at "free_next" to bootstrap ourselves. Start by
// taking inventory of free pages.
page_block *free_head = nullptr;
page_block *used_head = nullptr;
free_next = gather_block_lists(
free_next, memory_map, map_length, desc_length,
&free_head, &used_head);
page_consumer allocator(free_next);
block_allocator block_slab(page_manager::page_size, allocator);
block_list used;
block_list free;
// Unused page_block structs go here - finish out the current page with them
page_block *cache_head = fill_page_with_blocks(free_next);
free_next = page_align(free_next);
gather_block_lists(block_slab, used, free, memory_map, map_length, desc_length);
block_slab.allocate(); // Make sure we have extra
free_next = allocator.current;
// Now go back through these lists and consolidate
page_block *freed = page_block::consolidate(free_head);
cache_head = page_block::append(cache_head, freed);
freed = page_block::consolidate(used_head);
cache_head = page_block::append(cache_head, freed);
block_slab.append(page_block::consolidate(free));
block_slab.append(page_block::consolidate(used));
// Pull out the block that represents the bootstrap pages we've used
uint64_t used = free_next - free_region_start_virt;
uint64_t used_pages = used / page_manager::page_size;
uint64_t used_bytes = free_next - free_region_start_virt;
uint64_t used_pages = used_bytes / page_manager::page_size;
uint64_t remaining_pages = want_pages - used_pages;
page_block *removed = remove_block_for(
&free_head,
free_region_start_phys,
used_pages,
&cache_head);
auto *removed = remove_block_for(free, free_region_start_phys,
used_pages, block_slab);
kassert(removed, "remove_block_for didn't find the bootstrap region.");
kassert(removed->physical_address == free_region_start_phys,
@@ -495,16 +442,13 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
// Add it to the used list
removed->virtual_address = free_region_start_virt;
removed->flags = page_block_flags::used | page_block_flags::mapped;
used_head = page_block::insert(used_head, removed);
used.sorted_insert(removed);
// Pull out the block that represents the rest
uint64_t free_next_phys = free_region_start_phys + used;
uint64_t free_next_phys = free_region_start_phys + used_bytes;
removed = remove_block_for(
&free_head,
free_next_phys,
remaining_pages,
&cache_head);
removed = remove_block_for(free, free_next_phys,
remaining_pages, block_slab);
kassert(removed, "remove_block_for didn't find the page table region.");
kassert(removed->physical_address == free_next_phys,
@@ -516,7 +460,7 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
// Record that we're about to remap it into the page table address space
removed->virtual_address = pt_start_virt;
removed->flags = page_block_flags::used | page_block_flags::mapped;
used_head = page_block::insert(used_head, removed);
used.sorted_insert(removed);
page_manager *pm = &g_page_manager;
@@ -539,9 +483,9 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
// Give the rest to the page_manager's cache for use in page_in
pm->free_table_pages(pml4 + 1, remaining_pages - 1);
for (page_block *cur = used_head; cur; cur = cur->next) {
if (!cur->has_flag(page_block_flags::mapped)) continue;
pm->page_in(pml4, cur->physical_address, cur->virtual_address, cur->count);
for (auto *block : used) {
if (!block->has_flag(page_block_flags::mapped)) continue;
pm->page_in(pml4, block->physical_address, block->virtual_address, block->count);
}
// Put our new PML4 into CR3 to start using it
@@ -549,5 +493,8 @@ memory_initialize(const void *memory_map, size_t map_length, size_t desc_length)
// We now have all used memory mapped ourselves. Let the page_manager take
// over from here.
g_page_manager.init(free_head, used_head, cache_head);
g_page_manager.init(
std::move(free),
std::move(used),
std::move(block_slab));
}

View File

@@ -42,100 +42,52 @@ void mm_grow_callback(void *next, size_t length)
}
size_t
page_block::length(page_block *list)
{
size_t i = 0;
for (page_block *b = list; b; b = b->next) ++i;
return i;
}
page_block *
page_block::append(page_block *list, page_block *extra)
{
if (list == nullptr) return extra;
else if (extra == nullptr) return list;
page_block *cur = list;
while (cur->next)
cur = cur->next;
cur->next = extra;
return list;
}
page_block *
page_block::insert(page_block *list, page_block *block)
{
if (list == nullptr) return block;
else if (block == nullptr) return list;
page_block *cur = list;
page_block *prev = nullptr;
while (cur && page_block::compare(block, cur) > 0) {
prev = cur;
cur = cur->next;
}
block->next = cur;
if (prev) {
prev->next = block;
return list;
}
return block;
}
int
page_block::compare(const page_block *lhs, const page_block *rhs)
page_block::compare(const page_block *rhs) const
{
if (lhs->virtual_address < rhs->virtual_address)
if (virtual_address < rhs->virtual_address)
return -1;
else if (lhs->virtual_address > rhs->virtual_address)
else if (virtual_address > rhs->virtual_address)
return 1;
if (lhs->physical_address < rhs->physical_address)
if (physical_address < rhs->physical_address)
return -1;
else if (lhs->physical_address > rhs->physical_address)
else if (physical_address > rhs->physical_address)
return 1;
return 0;
}
page_block *
page_block::consolidate(page_block *list)
page_block_list
page_block::consolidate(page_block_list &list)
{
page_block *freed = nullptr;
page_block *cur = list;
page_block_list freed;
while (cur) {
page_block *next = cur->next;
for (auto *cur : list) {
auto *next = cur->next();
if (next &&
while (next &&
cur->flags == next->flags &&
cur->physical_end() == next->physical_address &&
(!cur->has_flag(page_block_flags::mapped) ||
cur->virtual_end() == next->virtual_address)) {
cur->count += next->count;
cur->next = next->next;
next->zero(freed);
freed = next;
continue;
list.remove(next);
freed.push_back(next);
}
cur = cur->next;
}
return freed;
}
void
page_block::dump(page_block *list, const char *name, bool show_unmapped)
page_block::dump(const page_block_list &list, const char *name, bool show_unmapped)
{
log::info(logs::memory, "Block list %s:", name);
int count = 0;
for (page_block *cur = list; cur; cur = cur->next) {
for (auto *cur : list) {
count += 1;
if (!(show_unmapped || cur->has_flag(page_block_flags::mapped)))
continue;
@@ -161,13 +113,12 @@ page_block::dump(page_block *list, const char *name, bool show_unmapped)
}
void
page_block::zero(page_block *set_next)
page_block::zero()
{
physical_address = 0;
virtual_address = 0;
count = 0;
flags = page_block_flags::free;
next = set_next;
}
void
@@ -177,14 +128,11 @@ page_block::copy(page_block *other)
virtual_address = other->virtual_address;
count = other->count;
flags = other->flags;
next = other->next;
}
page_manager::page_manager() :
m_free(nullptr),
m_used(nullptr),
m_block_cache(nullptr),
m_block_slab(page_size),
m_page_cache(nullptr)
{
kassert(this == &g_page_manager, "Attempt to create another page_manager.");
@@ -192,25 +140,22 @@ page_manager::page_manager() :
void
page_manager::init(
page_block *free,
page_block *used,
page_block *block_cache)
page_block_list free,
page_block_list used,
page_block_list cache)
{
m_free = free;
m_used = used;
m_block_cache = block_cache;
// For now we're ignoring that we've got the scratch pages
// allocated, full of page_block structs. Eventually hand
// control of that to a slab allocator.
m_free.append(free);
m_used.append(used);
m_block_slab.append(cache);
consolidate_blocks();
// Initialize the kernel memory manager
addr_t end = 0;
for (page_block *b = m_used; b; b = b->next) {
if (b->virtual_address < page_offset) {
end = b->virtual_end();
for (auto *block : m_used) {
if (block->virtual_address &&
block->virtual_address < page_offset) {
end = block->virtual_end();
} else {
break;
}
@@ -258,7 +203,7 @@ page_manager::map_offset_pointer(void **pointer, size_t length)
addr_t c = ((length - 1) / page_size) + 1;
// TODO: cleanly search/split this as a block out of used/free if possible
page_block *block = get_block();
auto *block = m_block_slab.pop();
// TODO: page-align
block->physical_address = *p;
@@ -269,7 +214,7 @@ page_manager::map_offset_pointer(void **pointer, size_t length)
page_block_flags::mapped |
page_block_flags::mmio;
m_used = page_block::insert(m_used, block);
m_used.sorted_insert(block);
page_table *pml4 = get_pml4();
page_in(pml4, *p, v, c);
@@ -292,35 +237,6 @@ page_manager::dump_pml4(page_table *pml4, int max_index)
pml4->dump(4, max_index);
}
page_block *
page_manager::get_block()
{
page_block *block = m_block_cache;
if (block) {
m_block_cache = block->next;
block->next = 0;
return block;
} else {
kassert(0, "NYI: page_manager::get_block() needed to allocate.");
return nullptr;
}
}
void
page_manager::free_blocks(page_block *block)
{
if (!block) return;
page_block *cur = block;
while (cur) {
page_block *next = cur->next;
cur->zero(cur->next ? cur->next : m_block_cache);
cur = next;
}
m_block_cache = block;
}
page_table *
page_manager::get_table_page()
{
@@ -329,11 +245,13 @@ page_manager::get_table_page()
size_t n = pop_pages(32, &phys);
addr_t virt = phys + page_offset;
page_block *block = get_block();
auto *block = m_block_slab.pop();
block->physical_address = phys;
block->virtual_address = virt;
block->count = n;
page_block::insert(m_used, block);
m_used.sorted_insert(block);
page_in(get_pml4(), phys, virt, n);
@@ -372,8 +290,8 @@ page_manager::free_table_pages(void *pages, size_t count)
void
page_manager::consolidate_blocks()
{
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_free));
m_block_cache = page_block::append(m_block_cache, page_block::consolidate(m_used));
m_block_slab.append(page_block::consolidate(m_free));
m_block_slab.append(page_block::consolidate(m_used));
}
void *
@@ -383,19 +301,21 @@ page_manager::map_pages(addr_t address, size_t count, bool user, page_table *pml
if (!pml4) pml4 = get_pml4();
while (count) {
kassert(m_free, "page_manager::map_pages ran out of free pages!");
kassert(!m_free.empty(), "page_manager::map_pages ran out of free pages!");
addr_t phys = 0;
size_t n = pop_pages(count, &phys);
page_block *block = get_block();
auto *block = m_block_slab.pop();
block->physical_address = phys;
block->virtual_address = address;
block->count = n;
block->flags =
page_block_flags::used |
page_block_flags::mapped;
page_block::insert(m_used, block);
m_used.sorted_insert(block);
log::debug(logs::memory, "Paging in %d pages at p:%016lx to v:%016lx into %016lx table",
n, phys, address, pml4);
@@ -413,37 +333,28 @@ void *
page_manager::map_offset_pages(size_t count)
{
page_table *pml4 = get_pml4();
page_block *free = m_free;
page_block *prev = nullptr;
for (auto *free : m_free) {
if (free->count < count) continue;
while (free) {
if (free->count < count) {
prev = free;
free = free->next;
continue;
}
auto *used = m_block_slab.pop();
page_block *used = get_block();
used->count = count;
used->physical_address = free->physical_address;
used->virtual_address = used->physical_address + page_offset;
used->flags =
page_block_flags::used |
page_block_flags::mapped;
page_block::insert(m_used, used);
m_used.sorted_insert(used);
free->physical_address += count * page_size;
free->count -= count;
if (free->count == 0) {
if (prev)
prev->next = free->next;
else
m_free = free->next;
free->zero(m_block_cache);
m_block_cache = free;
m_free.remove(free);
free->zero();
m_block_slab.push(free);
}
log::debug(logs::memory, "Got request for offset map %016lx [%d]", used->virtual_address, count);
@@ -458,67 +369,61 @@ void
page_manager::unmap_pages(void* address, size_t count)
{
addr_t addr = reinterpret_cast<addr_t>(address);
size_t block_count = 0;
page_block **prev = &m_used;
page_block *cur = m_used;
while (cur && !cur->contains(addr)) {
prev = &cur->next;
cur = cur->next;
}
for (auto *block : m_used) {
if (!block->contains(addr)) continue;
kassert(cur, "Couldn't find existing mapped pages to unmap");
size_t size = page_size * count;
addr_t end = addr + size;
size_t size = page_size * count;
addr_t end = addr + size;
while (cur && cur->contains(addr)) {
size_t leading = addr - cur->virtual_address;
size_t leading = addr - block->virtual_address;
size_t trailing =
end > cur->virtual_end() ?
0 : (cur->virtual_end() - end);
end > block->virtual_end() ?
0 : (block->virtual_end() - end);
if (leading) {
size_t pages = leading / page_size;
page_block *lead_block = get_block();
lead_block->copy(cur);
lead_block->next = cur;
auto *lead_block = m_block_slab.pop();
lead_block->copy(block);
lead_block->count = pages;
cur->count -= pages;
cur->physical_address += leading;
cur->virtual_address += leading;
block->count -= pages;
block->physical_address += leading;
block->virtual_address += leading;
*prev = lead_block;
prev = &lead_block->next;
m_used.insert_before(block, lead_block);
}
if (trailing) {
size_t pages = trailing / page_size;
page_block *trail_block = get_block();
trail_block->copy(cur);
trail_block->next = cur->next;
auto *trail_block = m_block_slab.pop();
trail_block->copy(block);
trail_block->count = pages;
trail_block->physical_address += size;
trail_block->virtual_address += size;
cur->count -= pages;
block->count -= pages;
cur->next = trail_block;
m_used.insert_after(block, trail_block);
}
addr += cur->count * page_size;
page_block *next = cur->next;
addr += block->count * page_size;
*prev = cur->next;
cur->next = nullptr;
cur->virtual_address = 0;
cur->flags = cur->flags & ~(page_block_flags::used | page_block_flags::mapped);
m_free = page_block::insert(m_free, cur);
block->virtual_address = 0;
block->flags = block->flags &
~(page_block_flags::used | page_block_flags::mapped);
cur = next;
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
}
kassert(block_count, "Couldn't find existing mapped pages to unmap");
}
void
@@ -608,20 +513,17 @@ page_manager::page_out(page_table *pml4, addr_t virt_addr, size_t count)
size_t
page_manager::pop_pages(size_t count, addr_t *address)
{
kassert(m_free, "page_manager::pop_pages ran out of free pages!");
kassert(!m_free.empty(), "page_manager::pop_pages ran out of free pages!");
unsigned n = std::min(count, static_cast<size_t>(m_free->count));
*address = m_free->physical_address;
auto *first = m_free.front();
m_free->physical_address += n * page_size;
m_free->count -= n;
if (m_free->count == 0) {
page_block *block = m_free;
m_free = m_free->next;
unsigned n = std::min(count, static_cast<size_t>(first->count));
*address = first->physical_address;
block->zero(m_block_cache);
m_block_cache = block;
}
first->physical_address += n * page_size;
first->count -= n;
if (first->count == 0)
m_block_slab.push(m_free.pop_front());
return n;
}

View File

@@ -7,11 +7,15 @@
#include "kutil/memory.h"
#include "kutil/enum_bitfields.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
struct page_block;
struct page_table;
struct free_page_header;
using page_block_list = kutil::linked_list<page_block>;
using page_block_slab = kutil::slab_allocator<page_block>;
/// Manager for allocation of physical pages.
class page_manager
@@ -124,9 +128,9 @@ public:
private:
/// Set up the memory manager from bootstraped memory
void init(
page_block *free,
page_block *used,
page_block *block_cache);
page_block_list free,
page_block_list used,
page_block_list cache);
/// Initialize the virtual memory manager based on this object's state
void init_memory_manager();
@@ -192,10 +196,10 @@ private:
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
page_block *m_free; ///< Free pages list
page_block *m_used; ///< In-use pages list
page_block_list m_free; ///< Free pages list
page_block_list m_used; ///< In-use pages list
page_block_slab m_block_slab; ///< page_block slab allocator
page_block *m_block_cache; ///< Cache of unused page_block structs
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
friend void memory_initialize(const void *, size_t, size_t);
@@ -235,7 +239,6 @@ struct page_block
addr_t virtual_address;
uint32_t count;
page_block_flags flags;
page_block *next;
inline bool has_flag(page_block_flags f) const { return bitfield_has(flags, f); }
inline addr_t physical_end() const { return physical_address + (count * page_manager::page_size); }
@@ -245,53 +248,27 @@ struct page_block
inline bool contains_physical(addr_t addr) const { return addr >= physical_address && addr < physical_end(); }
/// Helper to zero out a block and optionally set the next pointer.
/// \arg next [optional] The value for the `next` pointer
void zero(page_block *set_next = nullptr);
void zero();
/// Helper to copy a bock from another block
/// \arg other The block to copy from
void copy(page_block *other);
/// \name Page block linked list functions
/// Functions to act on a `page_block *` as a linked list
/// @{
/// Count the items in the given linked list.
/// \arg list The list to count
/// \returns The number of entries in the list.
static size_t length(page_block *list);
/// Append a block or list to the given list.
/// \arg list The list to append to
/// \arg extra The list or block to be appended
/// \returns The new list head
static page_block * append(page_block *list, page_block *extra);
/// Sorted-insert of a block into the list by address.
/// \arg list The list to insert into
/// \arg block The single block to insert
/// \returns The new list head
static page_block * insert(page_block *list, page_block *block);
/// Compare two blocks by address.
/// \arg lhs The left-hand comparator
/// \arg rhs The right-hand comparator
/// \returns <0 if lhs is sorts earlier, >0 if lhs sorts later, 0 for equal
static int compare(const page_block *lhs, const page_block *rhs);
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
int compare(const page_block *rhs) const;
/// Traverse the list, joining adjacent blocks where possible.
/// \arg list The list to consolidate
/// \returns A linked list of freed page_block structures.
static page_block * consolidate(page_block *list);
static page_block_list consolidate(page_block_list &list);
/// Traverse the list, printing debug info on this list.
/// \arg list The list to print
/// \arg name [optional] String to print as the name of this list
/// \arg show_permanent [optional] If false, hide unmapped blocks
static void dump(page_block *list, const char *name = nullptr, bool show_unmapped = false);
/// @}
static void dump(const page_block_list &list, const char *name = nullptr, bool show_unmapped = false);
};

View File

@@ -48,6 +48,9 @@ public:
/// \returns The prev node in the list
inline const node_type * prev() const { return m_prev; }
private:
friend class linked_list<T>;
/// Insert an item after this one in the list.
/// \arg item The item to insert
void insert_after(node_type *item)
@@ -76,9 +79,6 @@ public:
m_next = m_prev = nullptr;
}
private:
friend class linked_list<T>;
node_type *m_next;
node_type *m_prev;
};
@@ -93,8 +93,8 @@ public:
list_iterator(item_type *item) : m_item(item) {}
inline T & operator*() { return *m_item; }
inline const T & operator*() const { return *m_item; }
inline item_type * operator*() { return m_item; }
inline const item_type * operator*() const { return m_item; }
inline list_iterator & operator++() { m_item = m_item ? m_item->next() : nullptr; return *this; }
inline list_iterator operator++(int) { return list_iterator<T>(m_item ? m_item->next() : nullptr); }
inline bool operator!=(const list_iterator<T> &other) { return m_item != other.m_item; }
@@ -118,9 +118,21 @@ public:
m_tail(nullptr)
{}
/// Move constructor. Takes ownership of list elements.
linked_list(linked_list<T> &&other) :
m_head(other.m_head),
m_tail(other.m_tail)
{
other.m_head = other.m_tail = nullptr;
}
/// Check if the list is empty.
/// \returns true if the list is empty
bool empty() const { return m_head == nullptr; }
/// Count the items in the list.
/// \returns The number of entries in the list.
size_t length()
size_t length() const
{
size_t len = 0;
for (item_type *cur = m_head; cur; cur = cur->m_next) ++len;
@@ -213,6 +225,66 @@ public:
list.m_head = list.m_tail = nullptr;
}
/// Append the contents of another list to the end of this list. The other
/// list is emptied, and this list takes ownership of its items.
/// \arg list The other list.
void append(linked_list<T> &&list)
{
if (!list.m_head) return;
if (!m_tail) {
m_head = list.m_head;
m_tail = list.m_tail;
} else {
m_tail->m_next = list.m_head;
m_tail = list.m_tail;
}
list.m_head = list.m_tail = nullptr;
}
/// Remove an item from the list.
/// \arg item The item to remove
void remove(item_type *item)
{
if (!item) return;
if (item == m_head)
m_head = item->m_next;
if (item == m_tail)
m_tail = item->m_prev;
item->remove();
}
/// Inserts an item into the list before another given item.
/// \arg existing The existing item to insert before
/// \arg item The new item to insert
void insert_before(item_type *existing, item_type *item)
{
if (!item) return;
if (!existing)
push_back(item);
else if (existing == m_head)
push_front(item);
else
existing->insert_before(item);
}
/// Inserts an item into the list after another given item.
/// \arg existing The existing item to insert after
/// \arg item The new item to insert
void insert_after(item_type *existing, item_type *item)
{
if (!item) return;
if (!existing)
push_front(item);
else if (existing == m_tail)
push_back(item);
else
existing->insert_after(item);
}
/// Insert an item into the list in a sorted position. Depends on T
/// having a method `int compare(const T *other)`.
/// \arg item The item to insert
@@ -224,12 +296,7 @@ public:
while (cur && item->compare(cur) > 0)
cur = cur->m_next;
if (!cur)
push_back(item);
else if (cur == m_head)
push_front(item);
else
cur->insert_before(item);
insert_before(cur, item);
}
/// Range-based for iterator generator.

View File

@@ -0,0 +1,60 @@
#pragma once
/// \file slab_allocator.h
/// A slab allocator and related definitions
#include "kutil/linked_list.h"
#include "kutil/memory.h"
namespace kutil {
/// A slab allocator for small structures kept in a linked list
template <typename T, typename Alloc = void * (*)(size_t)>
class slab_allocator :
public linked_list<T>
{
public:
using item_type = list_node<T>;
/// Default constructor.
/// \arg chunk_size The size of chunk to allocate, in bytes. 0 means default.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(size_t chunk_size = 0, Alloc alloc = malloc) :
m_chunk_size(chunk_size),
m_alloc(alloc)
{
}
/// Get an item from the cache. May allocate a new chunk if the cache is empty.
/// \returns An allocated element
inline item_type * pop()
{
if (this->empty()) allocate();
item_type *item = this->pop_front();
kutil::memset(item, 0, sizeof(item_type));
return item;
}
/// Return an item to the cache.
/// \arg item A previously allocated element
inline void push(item_type *item)
{
this->push_front(item);
}
void allocate()
{
size_t size = m_chunk_size ? m_chunk_size : 10 * sizeof(item_type);
void *memory = m_alloc(size);
size_t count = size / sizeof(item_type);
item_type *items = reinterpret_cast<item_type *>(memory);
for (size_t i = 0; i < count; ++i)
this->push_back(&items[i]);
}
private:
size_t m_chunk_size;
Alloc m_alloc;
};
} // namespace kutil

View File

@@ -0,0 +1,19 @@
#pragma once
#include <atomic>
namespace kutil {
class spinlock
{
public:
spinlock() : m_lock(false) {}
inline void enter() { while (!m_lock.exchange(true)); }
inline void leave() { m_lock.store(false); }
private:
std::atomic<bool> m_lock;
};
} // namespace kutil

View File

@@ -35,8 +35,8 @@ public:
virtual bool match (vector const& vec) const override
{
size_t index = m_reverse ? vec.size() - 1 : 0;
for (const T &i : m_list) {
if (&i != &vec[index]) return false;
for (const T *i : m_list) {
if (i != &vec[index]) return false;
index += m_reverse ? -1 : 1;
}
return true;
@@ -65,9 +65,9 @@ public:
virtual bool match (list const& l) const override
{
int big = std::numeric_limits<int>::min();
for (const T &i : l) {
if (i.value < big) return false;
big = i.value;
for (const T *i : l) {
if (i->value < big) return false;
big = i->value;
}
return true;
}
@@ -78,24 +78,51 @@ public:
}
};
template <typename T>
class ListContainsMatcher :
public Catch::MatcherBase<linked_list<T>>
{
public:
using item = list_node<T>;
using list = linked_list<T>;
ListContainsMatcher(const item &needle) : m_needle(needle) {}
virtual bool match (list const& l) const override
{
for (const T *i : l)
if (i == &m_needle) return true;
return false;
}
virtual std::string describe() const override
{
return "contains the given item";
}
const item &m_needle;
};
template <typename T>
ListVectorCompare<T> IsSameAsList(const linked_list<T> &list, bool reversed = false)
{
return ListVectorCompare<T>(list, reversed);
}
template <typename T>
ListContainsMatcher<T> ListContains(const list_node<T> &item)
{
return ListContainsMatcher<T>(item);
}
TEST_CASE( "Linked list tests", "[containers list]" )
{
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
std::uniform_int_distribution<int> gen(1, 1000);
linked_list<unsortableT> ulist;
int value = 0;
std::vector<list_node<unsortableT>> unsortables(test_list_size);
for (auto &i : unsortables) {
i.value = gen(rng);
i.value = value++;
ulist.push_back(&i);
}
CHECK( ulist.length() == test_list_size );
@@ -104,13 +131,26 @@ TEST_CASE( "Linked list tests", "[containers list]" )
linked_list<unsortableT> ulist_reversed;
for (auto &i : unsortables) {
i.remove();
ulist.remove(&i);
ulist_reversed.push_front(&i);
}
CHECK( ulist_reversed.length() == test_list_size );
CHECK_THAT( unsortables, IsSameAsList(ulist_reversed, true) );
auto &removed = unsortables[test_list_size / 2];
ulist_reversed.remove(&removed);
CHECK( ulist_reversed.length() == test_list_size - 1 );
CHECK_THAT( ulist_reversed, !ListContains(removed) );
}
TEST_CASE( "Sorted list tests", "[containers list]" )
{
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
std::uniform_int_distribution<int> gen(1, 1000);
linked_list<sortableT> slist;
std::vector<list_node<sortableT>> sortables(test_list_size);

View File

@@ -21,6 +21,11 @@ def options(opt):
default='tamsyn8x16r.psf',
help='Font for the console')
opt.add_option('--debug',
action='store_true',
default=False,
help='Compile in debugging mode')
def common_configure(ctx):
from os import listdir
@@ -37,6 +42,7 @@ def common_configure(ctx):
ctx.env.POPCORN_ARCH = ctx.options.arch
ctx.env.KERNEL_FILENAME = ctx.options.kernel_filename
ctx.env.FONT_NAME = ctx.options.font
ctx.env.DEBUG = ctx.options.debug
ctx.env.ARCH_D = join(str(ctx.path), "src", "arch",
ctx.env.POPCORN_ARCH)
@@ -81,6 +87,9 @@ def common_configure(ctx):
"VERSION_GITSHA=0x{}{}".format({True:1}.get(dirty, 0), git_sha),
])
if ctx.env.DEBUG:
ctx.env.append_value('DEFINES', ['DEBUG=1'])
ctx.env.append_value('QEMUOPTS', [
'-smp', '1',
'-m', '512',