Overhaul memory allocation model

This commit makes several fundamental changes to memory handling:

- the frame allocator is now only an allocator for free frames, and does
  not track used frames.
- the frame allocator now stores its free list inside the free frames
  themselves, as a hybrid stack/span model.
  - This has the implication that all frames must currently fit within
    the offset area.
- kutil has a new allocator interface, which is the only allowed way for
  any code outside of src/kernel to allocate. Code under src/kernel
  _may_ use new/delete, but should prefer the allocator interface.
- the heap manager has become heap_allocator, which is merely an
  implementation of kutil::allocator which doles out sections of a given
  address range.
- the heap manager now only writes block headers when necessary,
  avoiding page faults until they're actually needed
- page_manager now has a page fault handler, which checks with the
  address_manager to see if the address is known, and provides a frame
  mapping if it is, allowing heap manager to work with its entire
  address size from the start. (Currently 32GiB.)
This commit is contained in:
Justin C. Miller
2019-04-16 01:13:09 -07:00
parent fd1adc0262
commit da2fc9aab1
33 changed files with 782 additions and 1010 deletions

View File

@@ -22,6 +22,7 @@ modules:
- src/kernel/debug.s
- src/kernel/device_manager.cpp
- src/kernel/font.cpp
- src/kernel/frame_allocator.cpp
- src/kernel/fs/gpt.cpp
- src/kernel/gdt.cpp
- src/kernel/gdt.s
@@ -101,8 +102,7 @@ modules:
source:
- src/libraries/kutil/assert.cpp
- src/libraries/kutil/bip_buffer.cpp
- src/libraries/kutil/frame_allocator.cpp
- src/libraries/kutil/heap_manager.cpp
- src/libraries/kutil/heap_allocator.cpp
- src/libraries/kutil/logger.cpp
- src/libraries/kutil/memory.cpp
- src/libraries/kutil/printf.c
@@ -126,8 +126,7 @@ modules:
source:
- src/tests/address_manager.cpp
- src/tests/constexpr_hash.cpp
- src/tests/frame_allocator.cpp
- src/tests/linked_list.cpp
- src/tests/logger.cpp
- src/tests/heap_manager.cpp
- src/tests/heap_allocator.cpp
- src/tests/main.cpp

View File

@@ -22,6 +22,9 @@ namespace memory {
/// Initial process thread's stack size, in pages
static const unsigned initial_stack_pages = 1;
/// Max size of the kernel heap
static const size_t kernel_max_heap = 0x800000000; // 32GiB
/// Helper to determine if a physical address can be accessed
/// through the page_offset area.
inline bool page_mappable(uintptr_t a) { return (a & page_offset) == 0; }

View File

@@ -14,7 +14,7 @@
static const char expected_signature[] = "RSD PTR ";
device_manager device_manager::s_instance(nullptr);
device_manager device_manager::s_instance(nullptr, kutil::allocator::invalid);
struct acpi1_rsdp
{
@@ -59,8 +59,13 @@ void irq4_callback(void *)
}
device_manager::device_manager(const void *root_table) :
m_lapic(nullptr)
device_manager::device_manager(const void *root_table, kutil::allocator &alloc) :
m_lapic(nullptr),
m_ioapics(alloc),
m_pci(alloc),
m_devices(alloc),
m_irqs(alloc),
m_blockdevs(alloc)
{
kassert(root_table != 0, "ACPI root table pointer is null.");
@@ -93,7 +98,7 @@ device_manager::device_manager(const void *root_table) :
ioapic *
device_manager::get_ioapic(int i)
{
return (i < m_ioapics.count()) ? m_ioapics[i] : nullptr;
return (i < m_ioapics.count()) ? &m_ioapics[i] : nullptr;
}
static void
@@ -148,19 +153,31 @@ device_manager::load_apic(const acpi_apic *apic)
uint8_t const *p = apic->controller_data;
uint8_t const *end = p + count;
// Pass one: set up IOAPIC objcts
// Pass one: count IOAPIC objcts
int num_ioapics = 0;
while (p < end) {
const uint8_t type = p[0];
const uint8_t length = p[1];
if (type == 1) num_ioapics++;
p += length;
}
m_ioapics.set_capacity(num_ioapics);
// Pass two: set up IOAPIC objcts
p = apic->controller_data;
while (p < end) {
const uint8_t type = p[0];
const uint8_t length = p[1];
if (type == 1) {
uint32_t *base = reinterpret_cast<uint32_t *>(kutil::read_from<uint32_t>(p+4));
uint32_t base_gsr = kutil::read_from<uint32_t>(p+8);
m_ioapics.append(new ioapic(base, base_gsr));
m_ioapics.emplace(base, base_gsr);
}
p += length;
}
// Pass two: configure APIC objects
// Pass three: configure APIC objects
p = apic->controller_data;
while (p < end) {
const uint8_t type = p[0];
@@ -186,7 +203,7 @@ device_manager::load_apic(const acpi_apic *apic)
source, gsi, (flags & 0x3), ((flags >> 2) & 0x3));
// TODO: in a multiple-IOAPIC system this might be elsewhere
m_ioapics[0]->redirect(source, static_cast<isr>(gsi), flags, true);
m_ioapics[0].redirect(source, static_cast<isr>(gsi), flags, true);
}
break;
@@ -212,10 +229,10 @@ device_manager::load_apic(const acpi_apic *apic)
p += length;
}
for (uint8_t i = 0; i < m_ioapics[0]->get_num_gsi(); ++i) {
for (uint8_t i = 0; i < m_ioapics[0].get_num_gsi(); ++i) {
switch (i) {
case 2: break;
default: m_ioapics[0]->mask(i, false);
default: m_ioapics[0].mask(i, false);
}
}

View File

@@ -2,14 +2,13 @@
/// \file device_manager.h
/// The device manager definition
#include "kutil/vector.h"
#include "apic.h"
#include "pci.h"
struct acpi_xsdt;
struct acpi_apic;
struct acpi_mcfg;
class block_device;
class lapic;
class ioapic;
using irq_callback = void (*)(void *);
@@ -19,8 +18,9 @@ class device_manager
{
public:
/// Constructor.
/// \arg root_table Pointer to the ACPI RSDP
device_manager(const void *root_table);
/// \arg root_table Pointer to the ACPI RSDP
/// \arg alloc Allocator for device arrays
device_manager(const void *root_table, kutil::allocator &alloc);
/// Get the system global device manager.
/// \returns A reference to the system device manager
@@ -105,7 +105,7 @@ private:
void bad_irq(uint8_t irq);
lapic *m_lapic;
kutil::vector<ioapic *> m_ioapics;
kutil::vector<ioapic> m_ioapics;
kutil::vector<pci_group> m_pci;
kutil::vector<pci_device> m_devices;

View File

@@ -0,0 +1,93 @@
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "frame_allocator.h"
using memory::frame_size;
using memory::page_offset;
using frame_block_node = kutil::list_node<frame_block>;
frame_allocator g_frame_allocator;
int
frame_block::compare(const frame_block *rhs) const
{
if (address < rhs->address)
return -1;
else if (address > rhs->address)
return 1;
return 0;
}
frame_allocator::raw_alloc::raw_alloc(frame_allocator &fa) : m_fa(fa) {}
void *
frame_allocator::raw_alloc::allocate(size_t size)
{
kassert(size <= frame_size, "Raw allocator only allocates a single page");
uintptr_t addr = 0;
if (size <= frame_size)
m_fa.allocate(1, &addr);
return reinterpret_cast<void*>(addr + page_offset);
}
void
frame_allocator::raw_alloc::free(void *p)
{
m_fa.free(reinterpret_cast<uintptr_t>(p), 1);
}
frame_allocator::frame_allocator() :
m_raw_alloc(*this)
{
}
size_t
frame_allocator::allocate(size_t count, uintptr_t *address)
{
kassert(!m_free.empty(), "frame_allocator::pop_frames ran out of free frames!");
if (m_free.empty())
return 0;
auto *first = m_free.front();
if (count >= first->count) {
*address = first->address;
m_free.remove(first);
return first->count;
} else {
first->count -= count;
*address = first->address + (first->count * frame_size);
return count;
}
}
inline uintptr_t end(frame_block *node) { return node->address + node->count * frame_size; }
void
frame_allocator::free(uintptr_t address, size_t count)
{
frame_block_node *node =
reinterpret_cast<frame_block_node*>(address + page_offset);
kutil::memset(node, 0, sizeof(frame_block_node));
node->address = address;
node->count = count;
m_free.sorted_insert(node);
frame_block_node *next = node->next();
if (next && end(node) == next->address) {
node->count += next->count;
m_free.remove(next);
}
frame_block_node *prev = node->prev();
if (prev && end(prev) == address) {
prev->count += node->count;
m_free.remove(node);
}
}

View File

@@ -0,0 +1,70 @@
#pragma once
/// \file frame_allocator.h
/// Allocator for physical memory frames
#include <stdint.h>
#include "kutil/allocator.h"
#include "kutil/linked_list.h"
struct frame_block;
using frame_block_list = kutil::linked_list<frame_block>;
/// Allocator for physical memory frames
class frame_allocator
{
public:
/// Default constructor
frame_allocator();
/// Get free frames from the free list. Only frames from the first free block
/// are returned, so the number may be less than requested, but they will
/// be contiguous.
/// \arg count The maximum number of frames to get
/// \arg address [out] The physical address of the first frame
/// \returns The number of frames retrieved
size_t allocate(size_t count, uintptr_t *address);
/// Free previously allocated frames.
/// \arg address The physical address of the first frame to free
/// \arg count The number of frames to be freed
void free(uintptr_t address, size_t count);
/// Get a memory allocator that allocates raw pages
/// \returns The allocator ojbect
kutil::allocator & raw_allocator() { return m_raw_alloc; }
private:
class raw_alloc :
public kutil::allocator
{
public:
raw_alloc(frame_allocator &fa);
virtual void * allocate(size_t size) override;
virtual void free(void *p) override;
private:
frame_allocator &m_fa;
};
raw_alloc m_raw_alloc;
frame_block_list m_free; ///< Free frames list
frame_allocator(const frame_allocator &) = delete;
};
/// A block of contiguous frames. Each `frame_block` represents contiguous
/// physical frames with the same attributes.
struct frame_block
{
uintptr_t address;
uint32_t count;
/// Compare two blocks by address.
/// \arg rhs The right-hand comparator
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
int compare(const frame_block *rhs) const;
};
extern frame_allocator g_frame_allocator;

View File

@@ -181,21 +181,26 @@ isr_handler(cpu_state *regs)
break;
case isr::isrPageFault: {
cons->set_color(11);
cons->puts("\nPage Fault:\n");
cons->set_color();
uintptr_t cr2 = 0;
__asm__ __volatile__ ("mov %%cr2, %0" : "=r"(cr2));
cons->puts(" flags:");
if (regs->errorcode & 0x01) cons->puts(" present");
if (regs->errorcode & 0x02) cons->puts(" write");
if (regs->errorcode & 0x04) cons->puts(" user");
if (regs->errorcode & 0x08) cons->puts(" reserved");
if (regs->errorcode & 0x10) cons->puts(" ip");
cons->puts("\n");
print_regs(*regs);
print_stacktrace(2);
if (!page_manager::get()->fault_handler(cr2)) {
cons->set_color(11);
cons->puts("\nPage Fault:\n");
cons->set_color();
cons->puts(" flags:");
if (regs->errorcode & 0x01) cons->puts(" present");
if (regs->errorcode & 0x02) cons->puts(" write");
if (regs->errorcode & 0x04) cons->puts(" user");
if (regs->errorcode & 0x08) cons->puts(" reserved");
if (regs->errorcode & 0x10) cons->puts(" ip");
cons->puts("\n");
print_regs(*regs);
print_stacktrace(2);
_halt();
}
}
_halt();
break;
case isr::isrTimer:

View File

@@ -15,7 +15,6 @@
#include "log.h"
#include "page_manager.h"
#include "scheduler.h"
#include "screen.h"
#include "serial.h"
#include "syscall.h"
@@ -51,7 +50,7 @@ kernel_main(kernel_args *header)
gdt_init();
interrupts_init();
memory_initialize(
kutil::allocator &heap = memory_initialize(
header->scratch_pages,
header->memory_map,
header->memory_map_length,
@@ -72,7 +71,7 @@ kernel_main(kernel_args *header)
log::debug(logs::boot, "ACPI root table is at: %016lx", header->acpi_table);
log::debug(logs::boot, "Runtime service is at: %016lx", header->runtime);
initrd::disk ird(header->initrd);
initrd::disk ird(header->initrd, heap);
log::info(logs::boot, "initrd loaded with %d files.", ird.files().count());
for (auto &f : ird.files())
log::info(logs::boot, " %s%s (%d bytes).", f.executable() ? "*" : "", f.name(), f.size());
@@ -83,7 +82,7 @@ kernel_main(kernel_args *header)
*/
device_manager *devices =
new (&device_manager::get()) device_manager(header->acpi_table);
new (&device_manager::get()) device_manager(header->acpi_table, heap);
interrupts_enable();
@@ -130,7 +129,7 @@ kernel_main(kernel_args *header)
devices->get_lapic()->calibrate_timer();
syscall_enable();
scheduler *sched = new (&scheduler::get()) scheduler(devices->get_lapic());
scheduler *sched = new (&scheduler::get()) scheduler(devices->get_lapic(), heap);
sched->create_kernel_task(-1, logger_task);

View File

@@ -2,72 +2,26 @@
#include <utility>
#include "kutil/address_manager.h"
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/heap_manager.h"
#include "kutil/heap_allocator.h"
#include "frame_allocator.h"
#include "io.h"
#include "log.h"
#include "page_manager.h"
using kutil::frame_block;
using kutil::frame_block_flags;
using kutil::frame_block_list;
using memory::frame_size;
using memory::kernel_max_heap;
using memory::kernel_offset;
using memory::page_offset;
static const unsigned ident_page_flags = 0xb;
kutil::frame_allocator g_frame_allocator;
kutil::address_manager g_kernel_address_manager;
kutil::heap_manager g_kernel_heap_manager;
kutil::heap_allocator g_kernel_heap;
void * mm_grow_callback(size_t length)
{
kassert(length % frame_size == 0,
"Heap manager requested a fractional page.");
size_t pages = length / frame_size;
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
uintptr_t addr = g_kernel_address_manager.allocate(length);
g_page_manager.map_pages(addr, pages);
return reinterpret_cast<void *>(addr);
}
namespace {
// Page-by-page initial allocator for the initial frame_block allocator
struct page_consumer
{
page_consumer(uintptr_t start, unsigned count, unsigned used = 0) :
current(start + used * frame_size),
used(used),
max(count) {}
void * get_page() {
kassert(used++ < max, "page_consumer ran out of pages");
void *retval = reinterpret_cast<void *>(current);
current += frame_size;
return retval;
}
void * operator()(size_t size) {
kassert(size == frame_size, "page_consumer used with non-page size!");
return get_page();
}
unsigned left() const { return max - used; }
uintptr_t current;
unsigned used, max;
};
using block_allocator =
kutil::slab_allocator<kutil::frame_block, page_consumer &>;
using region_allocator =
kutil::slab_allocator<kutil::buddy_region, page_consumer &>;
}
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); }
enum class efi_memory_type : uint32_t
{
@@ -107,92 +61,103 @@ struct efi_memory_descriptor
uint64_t flags;
};
static const efi_memory_descriptor *
desc_incr(const efi_memory_descriptor *d, size_t desc_length)
struct memory_map
{
return reinterpret_cast<const efi_memory_descriptor *>(
reinterpret_cast<const uint8_t *>(d) + desc_length);
}
memory_map(const void *efi_map, size_t map_length, size_t desc_length) :
efi_map(efi_map), map_length(map_length), desc_length(desc_length) {}
void
gather_block_lists(
block_allocator &allocator,
frame_block_list &used,
frame_block_list &free,
const void *memory_map,
size_t map_length,
size_t desc_length)
{
efi_memory_descriptor const *desc = reinterpret_cast<efi_memory_descriptor const *>(memory_map);
efi_memory_descriptor const *end = desc_incr(desc, map_length);
class iterator
{
public:
iterator(const memory_map &map, efi_memory_descriptor const *item) :
map(map), item(item) {}
while (desc < end) {
auto *block = allocator.pop();
block->address = desc->physical_start;
block->count = desc->pages;
bool block_used;
switch (desc->type) {
case efi_memory_type::loader_code:
case efi_memory_type::loader_data:
block_used = true;
block->flags = frame_block_flags::pending_free;
break;
case efi_memory_type::boot_services_code:
case efi_memory_type::boot_services_data:
case efi_memory_type::available:
block_used = false;
break;
case efi_memory_type::acpi_reclaim:
block_used = true;
block->flags =
frame_block_flags::acpi_wait |
frame_block_flags::map_ident;
break;
case efi_memory_type::persistent:
block_used = false;
block->flags = frame_block_flags::nonvolatile;
break;
case efi_memory_type::popcorn_kernel:
block_used = true;
block->flags =
frame_block_flags::permanent |
frame_block_flags::map_kernel;
break;
case efi_memory_type::popcorn_data:
case efi_memory_type::popcorn_initrd:
block_used = true;
block->flags =
frame_block_flags::pending_free |
frame_block_flags::map_kernel;
break;
case efi_memory_type::popcorn_scratch:
block_used = true;
block->flags = frame_block_flags::map_offset;
break;
default:
block_used = true;
block->flags = frame_block_flags::permanent;
break;
inline efi_memory_descriptor const * operator*() const { return item; }
inline bool operator!=(const iterator &other) { return item != other.item; }
inline iterator & operator++() {
item = kutil::offset_pointer(item, map.desc_length);
return *this;
}
if (block_used)
used.push_back(block);
else
free.push_back(block);
private:
const memory_map &map;
efi_memory_descriptor const *item;
};
desc = desc_incr(desc, desc_length);
iterator begin() const {
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(efi_map));
}
}
void
iterator end() const {
const void *end = kutil::offset_pointer(efi_map, map_length);
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(end));
}
const void *efi_map;
size_t map_length;
size_t desc_length;
};
class memory_bootstrap
{
public:
memory_bootstrap(const void *memory_map, size_t map_length, size_t desc_length) :
map(memory_map, map_length, desc_length) {}
void add_free_frames(frame_allocator &fa) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::loader_code ||
desc->type == efi_memory_type::loader_data ||
desc->type == efi_memory_type::boot_services_code ||
desc->type == efi_memory_type::boot_services_data ||
desc->type == efi_memory_type::available)
{
fa.free(desc->physical_start, desc->pages);
}
}
}
void add_used_frames(kutil::address_manager &am) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::popcorn_data ||
desc->type == efi_memory_type::popcorn_initrd)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
am.mark(virt_addr, desc->pages * frame_size);
}
else if (desc->type == efi_memory_type::popcorn_kernel)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
am.mark_permanent(virt_addr, desc->pages * frame_size);
}
}
}
void page_in_kernel(page_manager &pm, page_table *pml4) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::popcorn_kernel ||
desc->type == efi_memory_type::popcorn_data ||
desc->type == efi_memory_type::popcorn_initrd)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
pm.page_in(pml4, desc->physical_start, virt_addr, desc->pages);
}
if (desc->type == efi_memory_type::acpi_reclaim) {
pm.page_in(pml4, desc->physical_start, desc->physical_start, desc->pages);
}
}
// Put our new PML4 into CR3 to start using it
page_manager::set_pml4(pml4);
pm.m_kernel_pml4 = pml4;
}
private:
const memory_map map;
};
kutil::allocator &
memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length)
{
// make sure the options we want in CR4 are set
@@ -227,81 +192,51 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
__sync_synchronize();
io_wait();
// We now have pages starting at "scratch_virt" to bootstrap ourselves. Start by
// taking inventory of free pages.
uintptr_t scratch_virt = scratch_phys + page_offset;
uint64_t used_pages = 2; // starts with PML4 + offset PDP
page_consumer allocator(scratch_virt, scratch_pages, used_pages);
memory_bootstrap bootstrap {memory_map, map_length, desc_length};
block_allocator block_slab(frame_size, allocator);
frame_block_list used;
frame_block_list free;
// Now tell the frame allocator what's free
frame_allocator *fa = new (&g_frame_allocator) frame_allocator;
bootstrap.add_free_frames(*fa);
gather_block_lists(block_slab, used, free, memory_map, map_length, desc_length);
block_slab.allocate(); // Make sure we have extra
// Build an initial address manager that we'll copy into the real
// address manager later (so that we can use a raw allocator now)
kutil::allocator &alloc = fa->raw_allocator();
kutil::address_manager init_am(alloc);
init_am.add_regions(kernel_offset, page_offset - kernel_offset);
bootstrap.add_used_frames(init_am);
// Now go back through these lists and consolidate
block_slab.append(frame_block::consolidate(free));
// Add the heap into the address manager
uintptr_t heap_start = page_offset - kernel_max_heap;
init_am.mark(heap_start, kernel_max_heap);
region_allocator region_slab(frame_size, allocator);
region_slab.allocate(); // Allocate some buddy regions for the address_manager
kutil::allocator *heap_alloc =
new (&g_kernel_heap) kutil::heap_allocator(heap_start, kernel_max_heap);
// Copy everything into the real address manager
kutil::address_manager *am =
new (&g_kernel_address_manager) kutil::address_manager(std::move(region_slab));
new (&g_kernel_address_manager) kutil::address_manager(
std::move(init_am), *heap_alloc);
am->add_regions(kernel_offset, page_offset - kernel_offset);
// Create the page manager
page_manager *pm = new (&g_page_manager) page_manager(*fa, *am);
// Finally, build an acutal set of kernel page tables that just contains
// Give the frame_allocator back the rest of the scratch pages
fa->free(scratch_phys + (3 * frame_size), scratch_pages - 3);
// Finally, build an acutal set of kernel page tables where we'll only add
// what the kernel actually has mapped, but making everything writable
// (especially the page tables themselves)
page_table *pml4 = reinterpret_cast<page_table *>(allocator.get_page());
page_table *pml4 = &tables[2];
pml4 = kutil::offset_pointer(pml4, page_offset);
kutil::memset(pml4, 0, sizeof(page_table));
pml4->entries[511] = reinterpret_cast<uintptr_t>(id_pdp) | 0x10b;
kutil::frame_allocator *fa =
new (&g_frame_allocator) kutil::frame_allocator(std::move(block_slab));
page_manager *pm = new (&g_page_manager) page_manager(*fa, *am);
bootstrap.page_in_kernel(*pm, pml4);
// Give the rest to the page_manager's cache for use in page_in
pm->free_table_pages(
reinterpret_cast<void *>(allocator.current),
allocator.left());
// Reclaim the old PML4
fa->free(scratch_phys, 1);
for (auto *block : used) {
uintptr_t virt_addr = 0;
switch (block->flags & frame_block_flags::map_mask) {
case frame_block_flags::map_ident:
virt_addr = block->address;
break;
case frame_block_flags::map_kernel:
virt_addr = block->address + kernel_offset;
if (block->flags && frame_block_flags::permanent)
am->mark_permanent(virt_addr, block->count * frame_size);
else
am->mark(virt_addr, block->count * frame_size);
break;
default:
break;
}
block->flags -= frame_block_flags::map_mask;
if (virt_addr)
pm->page_in(pml4, block->address, virt_addr, block->count);
}
fa->init(std::move(free), std::move(used));
// Put our new PML4 into CR3 to start using it
page_manager::set_pml4(pml4);
pm->m_kernel_pml4 = pml4;
// Give the old pml4 back to the page_manager to recycle
pm->free_table_pages(reinterpret_cast<void *>(scratch_virt), 1);
// Set the heap manager
new (&g_kernel_heap_manager) kutil::heap_manager(mm_grow_callback);
kutil::setup::set_heap(&g_kernel_heap_manager);
return *heap_alloc;
}

View File

@@ -11,7 +11,6 @@ using memory::kernel_offset;
using memory::page_offset;
using memory::page_mappable;
extern kutil::frame_allocator g_frame_allocator;
extern kutil::address_manager g_kernel_address_manager;
page_manager g_page_manager(
g_frame_allocator,
@@ -40,7 +39,7 @@ struct free_page_header
page_manager::page_manager(
kutil::frame_allocator &frames,
frame_allocator &frames,
kutil::address_manager &addrs) :
m_page_cache(nullptr),
m_frames(frames),
@@ -341,13 +340,31 @@ page_manager::unmap_table(page_table *table, page_table::level lvl, bool free, p
void
page_manager::unmap_pages(void* address, size_t count, page_table *pml4)
{
if (!pml4) pml4 = get_pml4();
page_out(pml4, reinterpret_cast<uintptr_t>(address), count, true);
if (address >= kernel_offset) {
m_addrs.free(address, count);
if (!pml4)
pml4 = get_pml4();
uintptr_t iaddr = reinterpret_cast<uintptr_t>(address);
page_out(pml4, iaddr, count, true);
if (iaddr >= kernel_offset) {
// TODO
// m_addrs.free(address, count);
}
}
bool
page_manager::fault_handler(uintptr_t addr)
{
if (!m_addrs.contains(addr))
return false;
uintptr_t page = addr & ~0xfffull;
bool user = addr < kernel_offset;
map_pages(page, 1, user);
return true;
}
void
page_manager::check_needs_page(page_table *table, unsigned index, bool user)
{

View File

@@ -7,9 +7,9 @@
#include "kutil/address_manager.h"
#include "kutil/enum_bitfields.h"
#include "kutil/frame_allocator.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
#include "frame_allocator.h"
#include "kernel_memory.h"
#include "page_table.h"
@@ -20,7 +20,7 @@ class page_manager
{
public:
page_manager(
kutil::frame_allocator &frames,
frame_allocator &frames,
kutil::address_manager &addrs);
/// Helper to get the number of pages needed for a given number of bytes.
@@ -44,8 +44,9 @@ public:
/// \arg pml4 A pointer to the PML4 table to install.
static inline void set_pml4(page_table *pml4)
{
uintptr_t p = reinterpret_cast<uintptr_t>(pml4) - memory::page_offset;
__asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p & ~0xfffull) );
constexpr uint64_t phys_mask = ~memory::page_offset & ~0xfffull;
uintptr_t p = reinterpret_cast<uintptr_t>(pml4) & phys_mask;
__asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p) );
}
/// Allocate but don't switch to a new PML4 table. This table
@@ -113,6 +114,11 @@ public:
/// Get a pointer to the kernel's PML4
inline page_table * get_kernel_pml4() { return m_kernel_pml4; }
/// Attempt to handle a page fault.
/// \arg addr Address that triggered the fault
/// \returns True if the fault was handled
bool fault_handler(uintptr_t addr);
private:
/// Copy a physical page
/// \arg orig Physical address of the page to copy
@@ -169,10 +175,10 @@ private:
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
kutil::frame_allocator &m_frames;
frame_allocator &m_frames;
kutil::address_manager &m_addrs;
friend void memory_initialize(uint16_t, const void *, size_t, size_t);
friend class memory_bootstrap;
page_manager(const page_manager &) = delete;
};
@@ -205,4 +211,8 @@ page_table_align(T p)
/// Bootstrap the memory managers.
void memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length);
kutil::allocator & memory_initialize(
uint16_t scratch_pages,
const void *memory_map,
size_t map_length,
size_t desc_length);

View File

@@ -1,3 +1,4 @@
#include "kutil/heap_allocator.h"
#include "cpu.h"
#include "debug.h"
#include "log.h"
@@ -5,7 +6,7 @@
#include "scheduler.h"
extern "C" void task_fork_return_thunk();
extern kutil::heap_allocator g_kernel_heap; // TODO: this is a bad hack to get access to the heap
void
process::exit(uint32_t code)
@@ -67,7 +68,7 @@ process::setup_kernel_stack()
constexpr unsigned null_frame_entries = 2;
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
void *stack_bottom = kutil::malloc(initial_stack_size);
void *stack_bottom = g_kernel_heap.allocate(initial_stack_size);
kutil::memset(stack_bottom, 0, initial_stack_size);
log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx",

View File

@@ -16,7 +16,7 @@
using memory::initial_stack;
scheduler scheduler::s_instance(nullptr);
scheduler scheduler::s_instance(nullptr, kutil::allocator::invalid);
const uint64_t rflags_noint = 0x002;
const uint64_t rflags_int = 0x202;
@@ -28,9 +28,10 @@ extern "C" {
extern uint64_t idle_stack_end;
scheduler::scheduler(lapic *apic) :
scheduler::scheduler(lapic *apic, kutil::allocator &alloc) :
m_apic(apic),
m_next_pid(1)
m_next_pid(1),
m_process_allocator(alloc)
{
auto *idle = m_process_allocator.pop();
uint8_t last_pri = num_priorities - 1;

View File

@@ -3,6 +3,7 @@
/// The task scheduler and related definitions
#include <stdint.h>
#include "kutil/allocator.h"
#include "kutil/slab_allocator.h"
#include "process.h"
@@ -30,7 +31,8 @@ public:
/// Constructor.
/// \arg apic Pointer to the local APIC object
scheduler(lapic *apic);
/// \arg alloc Allocator to use for TCBs
scheduler(lapic *apic, kutil::allocator &alloc);
/// Create a new process from a program image in memory.
/// \arg name Name of the program image

View File

@@ -49,7 +49,7 @@ class disk
public:
/// Constructor.
/// \arg start The start of the initrd in memory
disk(const void *start);
disk(const void *start, kutil::allocator &alloc);
/// Get the vector of files on the disk
const kutil::vector<file> & files() const { return m_files; }

View File

@@ -23,7 +23,8 @@ file::executable() const {
}
disk::disk(const void *start)
disk::disk(const void *start, kutil::allocator &alloc) :
m_files(alloc)
{
auto *header = reinterpret_cast<const disk_header *>(start);
size_t length = header->length;

View File

@@ -1,190 +0,0 @@
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/memory.h"
namespace kutil {
using memory::frame_size;
using memory::page_offset;
int
frame_block::compare(const frame_block *rhs) const
{
if (address < rhs->address)
return -1;
else if (address > rhs->address)
return 1;
return 0;
}
frame_block_list
frame_block::consolidate(frame_block_list &list)
{
frame_block_list freed;
for (auto *cur : list) {
auto *next = cur->next();
while ( next &&
cur->flags == next->flags &&
cur->end() == next->address) {
cur->count += next->count;
list.remove(next);
freed.push_back(next);
}
}
return freed;
}
void
frame_block::zero()
{
address = 0;
count = 0;
flags = frame_block_flags::none;
}
void
frame_block::copy(frame_block *other)
{
address = other->address;
count = other->count;
flags = other->flags;
}
frame_allocator::frame_allocator(
frame_block_list cache)
{
m_cache.append(cache);
}
void
frame_allocator::init(
frame_block_list free,
frame_block_list used)
{
m_free.append(free);
m_used.append(used);
}
list_node<frame_block> *
frame_allocator::get_block_node()
{
if (m_cache.empty()) {
auto *first = m_free.front();
frame_block_node * start =
reinterpret_cast<frame_block_node*>(first->address + page_offset);
frame_block_node * end = offset_pointer(start, frame_size);
if (first->count == 1) {
m_free.remove(first);
} else {
first->count--;
first->address += frame_size;
}
while (start < end) {
m_cache.push_back(start);
start++;
}
}
return m_cache.pop_front();
}
void
frame_allocator::consolidate_blocks()
{
m_cache.append(frame_block::consolidate(m_free));
m_cache.append(frame_block::consolidate(m_used));
}
size_t
frame_allocator::allocate(size_t count, uintptr_t *address)
{
kassert(!m_free.empty(), "frame_allocator::pop_frames ran out of free frames!");
auto *first = m_free.front();
unsigned n = count < first->count ? count : first->count;
*address = first->address;
if (count >= first->count) {
m_free.remove(first);
m_used.sorted_insert(first);
} else {
auto *used = get_block_node();
used->copy(first);
used->count = n;
m_used.sorted_insert(used);
first->address += n * frame_size;
first->count -= n;
}
consolidate_blocks();
return n;
}
void
frame_allocator::free(uintptr_t address, size_t count)
{
size_t block_count = 0;
for (auto *block : m_used) {
if (!block->contains(address)) continue;
size_t size = frame_size * count;
uintptr_t end = address + size;
size_t leading = address - block->address;
size_t trailing =
end > block->end() ?
0 : (block->end() - end);
if (leading) {
size_t frames = leading / frame_size;
auto *lead_block = get_block_node();
lead_block->copy(block);
lead_block->count = frames;
block->count -= frames;
block->address += leading;
m_used.insert_before(block, lead_block);
}
if (trailing) {
size_t frames = trailing / frame_size;
auto *trail_block = get_block_node();
trail_block->copy(block);
trail_block->count = frames;
trail_block->address += size;
block->count -= frames;
m_used.insert_before(block, trail_block);
}
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
address += block->count * frame_size;
count -= block->count;
if (!count)
break;
}
kassert(block_count, "Couldn't find existing allocated frames to free");
consolidate_blocks();
}
} // namespace kutil

View File

@@ -1,12 +1,11 @@
#include <stdint.h>
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "kutil/heap_manager.h"
#include "kutil/heap_allocator.h"
namespace kutil {
struct heap_manager::mem_header
struct heap_allocator::mem_header
{
mem_header(mem_header *prev, mem_header *next, uint8_t size) :
m_prev(prev), m_next(next)
@@ -14,34 +13,29 @@ struct heap_manager::mem_header
set_size(size);
}
inline void set_size(uint8_t size)
{
inline void set_size(uint8_t size) {
m_prev = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(prev()) | (size & 0x3f));
}
inline void set_used(bool used)
{
inline void set_used(bool used) {
m_next = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(next()) | (used ? 1 : 0));
}
inline void set_next(mem_header *next)
{
inline void set_next(mem_header *next) {
bool u = used();
m_next = next;
set_used(u);
}
inline void set_prev(mem_header *prev)
{
inline void set_prev(mem_header *prev) {
uint8_t s = size();
m_prev = prev;
set_size(s);
}
void remove()
{
void remove() {
if (next()) next()->set_prev(prev());
if (prev()) prev()->set_next(next());
set_prev(nullptr);
@@ -67,25 +61,24 @@ private:
};
heap_manager::heap_manager() :
m_grow(nullptr)
{
}
heap_allocator::heap_allocator() : m_next(0), m_size(0) {}
heap_manager::heap_manager(grow_callback grow_cb) :
m_grow(grow_cb)
heap_allocator::heap_allocator(uintptr_t start, size_t size) :
m_next(start), m_size(size)
{
kutil::memset(m_free, 0, sizeof(m_free));
grow_memory();
}
void *
heap_manager::allocate(size_t length)
heap_allocator::allocate(size_t length)
{
size_t total = length + sizeof(mem_header);
unsigned size = min_size;
while (total > (1 << size)) size++;
kassert(size <= max_size, "Tried to allocate a block bigger than max_size");
if (size > max_size)
return nullptr;
mem_header *header = pop_free(size);
header->set_used(true);
@@ -93,18 +86,28 @@ heap_manager::allocate(size_t length)
}
void
heap_manager::free(void *p)
heap_allocator::free(void *p)
{
if (!p) return;
mem_header *header = reinterpret_cast<mem_header *>(p);
header -= 1; // p points after the header
header->set_used(false);
while (header->size() != max_size) {
auto size = header->size();
mem_header *buddy = header->buddy();
if (buddy->used() || buddy->size() != header->size()) break;
if (buddy->used() || buddy->size() != size)
break;
if (get_free(size) == buddy)
get_free(size) = buddy->next();
buddy->remove();
header = header->eldest() ? header : buddy;
header->set_size(header->size() + 1);
header->set_size(size + 1);
}
uint8_t size = header->size();
@@ -115,47 +118,60 @@ heap_manager::free(void *p)
}
void
heap_manager::grow_memory()
heap_allocator::ensure_block(unsigned size)
{
size_t length = (1 << max_size);
kassert(m_grow, "Tried to grow heap without a growth callback");
void *next = m_grow(length);
mem_header *block = new (next) mem_header(nullptr, get_free(max_size), max_size);
get_free(max_size) = block;
if (block->next())
block->next()->set_prev(block);
}
void
heap_manager::ensure_block(unsigned size)
{
if (get_free(size) != nullptr) return;
else if (size == max_size) {
grow_memory();
if (get_free(size) != nullptr)
return;
if (size == max_size) {
size_t bytes = (1 << max_size);
if (bytes <= m_size) {
mem_header *next = reinterpret_cast<mem_header *>(m_next);
new (next) mem_header(nullptr, nullptr, size);
get_free(size) = next;
m_next += bytes;
m_size -= bytes;
}
} else {
mem_header *orig = pop_free(size + 1);
if (orig) {
mem_header *next = kutil::offset_pointer(orig, 1 << size);
new (next) mem_header(orig, nullptr, size);
orig->set_next(next);
orig->set_size(size);
get_free(size) = orig;
}
}
mem_header *orig = pop_free(size + 1);
mem_header *next = kutil::offset_pointer(orig, 1 << size);
new (next) mem_header(orig, nullptr, size);
orig->set_next(next);
orig->set_size(size);
get_free(size) = orig;
}
heap_manager::mem_header *
heap_manager::pop_free(unsigned size)
heap_allocator::mem_header *
heap_allocator::pop_free(unsigned size)
{
ensure_block(size);
mem_header *block = get_free(size);
get_free(size) = block->next();
block->remove();
if (block) {
get_free(size) = block->next();
block->remove();
}
return block;
}
class invalid_allocator :
public allocator
{
public:
virtual void * allocate(size_t) override {
kassert(false, "Attempting to allocate from allocator::invalid");
return nullptr;
}
virtual void free(void *) override {
kassert(false, "Attempting to free from allocator::invalid");
}
} _invalid_allocator;
allocator &allocator::invalid = _invalid_allocator;
} // namespace kutil

View File

@@ -0,0 +1,31 @@
#pragma once
/// \file allocator.h
/// Allocator interface
#include <stdint.h>
#include "kernel_memory.h"
namespace kutil {
class allocator
{
public:
/// Allocate memory.
/// \arg length The amount of memory to allocate, in bytes
/// \returns A pointer to the allocated memory, or nullptr if
/// allocation failed.
virtual void * allocate(size_t size) = 0;
/// Free a previous allocation.
/// \arg p A pointer previously retuned by allocate()
virtual void free(void *p) = 0;
template <typename T>
inline T * allocate(unsigned count) {
return reinterpret_cast<T*>(allocate(count * sizeof(T)));
}
static allocator &invalid;
};
} // namespace kutil

View File

@@ -3,6 +3,7 @@
/// Helper base class for buddy allocators with external node storage.
#include <stdint.h>
#include <utility>
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
@@ -25,15 +26,21 @@ public:
static const size_t min_alloc = (1 << size_min);
static const size_t max_alloc = (1 << size_max);
/// Constructor.
buddy_allocator() {}
/// Default constructor creates an invalid object.
buddy_allocator() : m_alloc(allocator::invalid) {}
/// Constructor with an initial cache of region structs from bootstrapped
/// memory.
/// \arg cache List of pre-allocated ununused region_type structures
buddy_allocator(region_list cache)
/// Constructor.
/// \arg alloc Allocator to use for region nodes
buddy_allocator(allocator &alloc) : m_alloc(alloc) {}
/// Move-like constructor. Takes ownership of existing regions.
buddy_allocator(buddy_allocator &&other, allocator &alloc) :
m_alloc(alloc)
{
m_alloc.append(cache);
for (unsigned i = 0; i < buckets; ++i) {
m_free[i] = std::move(other.m_free[i]);
m_used[i] = std::move(other.m_used[i]);
}
}
/// Add address space to be managed.
@@ -173,6 +180,23 @@ public:
}
}
/// Check if an allocation exists
/// \arg addr Address within the managed space
/// \returns True if the address is in a region currently allocated
bool contains(uintptr_t addr)
{
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : used_bucket(i)) {
if (r->contains(addr))
return true;
else if (r->address < addr)
break;
}
}
return false;
}
protected:
/// Split a region of the given size into two smaller regions, returning
/// the new latter half
@@ -266,6 +290,8 @@ struct buddy_region
inline uintptr_t end() const { return address + (1ull << size); }
inline uintptr_t half() const { return address + (1ull << (size - 1)); }
inline bool contains(uintptr_t p) const { return p >= address && p < end(); }
inline uintptr_t buddy() const { return address ^ (1ull << size); }
inline bool elder() const { return address < buddy(); }

View File

@@ -1,122 +0,0 @@
#pragma once
/// \file frame_allocator.h
/// Allocator for physical memory frames
#include <stdint.h>
#include "kernel_memory.h"
#include "kutil/enum_bitfields.h"
#include "kutil/linked_list.h"
namespace kutil {
struct frame_block;
using frame_block_list = linked_list<frame_block>;
/// Allocator for physical memory frames
class frame_allocator
{
public:
/// Default constructor
frame_allocator() = default;
/// Constructor with a provided initial frame_block cache.
/// \arg cache List of pre-allocated but unused frame_block structures
frame_allocator(frame_block_list cache);
/// Initialize the frame allocator from bootstraped memory.
/// \arg free List of free blocks
/// \arg used List of currently used blocks
void init(
frame_block_list free,
frame_block_list used);
/// Get free frames from the free list. Only frames from the first free block
/// are returned, so the number may be less than requested, but they will
/// be contiguous.
/// \arg count The maximum number of frames to get
/// \arg address [out] The physical address of the first frame
/// \returns The number of frames retrieved
size_t allocate(size_t count, uintptr_t *address);
/// Free previously allocated frames.
/// \arg address The physical address of the first frame to free
/// \arg count The number of frames to be freed
void free(uintptr_t address, size_t count);
/// Consolidate the free and used block lists. Return freed blocks
/// to the cache.
void consolidate_blocks();
private:
using frame_block_node = list_node<frame_block>;
frame_block_list m_free; ///< Free frames list
frame_block_list m_used; ///< In-use frames list
frame_block_list m_cache; ///< Spare frame-block structs
frame_block_node *get_block_node();
frame_allocator(const frame_allocator &) = delete;
};
/// Flags used by `frame_block`.
enum class frame_block_flags : uint32_t
{
none = 0x0000,
mmio = 0x0001, ///< Memory is a MMIO region
nonvolatile = 0x0002, ///< Memory is non-volatile storage
pending_free = 0x0020, ///< Memory should be freed
acpi_wait = 0x0040, ///< Memory should be freed after ACPI init
permanent = 0x0080, ///< Memory is permanently unusable
// The following are used only during the memory bootstraping
// process, and tell the page manager where to initially map
// the given block.
map_ident = 0x0100, ///< Identity map
map_kernel = 0x0200, ///< Map into normal kernel space
map_offset = 0x0400, ///< Map into offset kernel space
map_mask = 0x0700, ///< Mask of all map_* values
};
} // namespace kutil
IS_BITFIELD(kutil::frame_block_flags);
namespace kutil {
/// A block of contiguous frames. Each `frame_block` represents contiguous
/// physical frames with the same attributes.
struct frame_block
{
uintptr_t address;
uint32_t count;
frame_block_flags flags;
inline bool has_flag(frame_block_flags f) const { return bitfield_has(flags, f); }
inline uintptr_t end() const { return address + (count * memory::frame_size); }
inline bool contains(uintptr_t addr) const { return addr >= address && addr < end(); }
/// Helper to zero out a block and optionally set the next pointer.
void zero();
/// Helper to copy a bock from another block
/// \arg other The block to copy from
void copy(frame_block *other);
/// Compare two blocks by address.
/// \arg rhs The right-hand comparator
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
int compare(const frame_block *rhs) const;
/// Traverse the list, joining adjacent blocks where possible.
/// \arg list The list to consolidate
/// \returns A linked list of freed frame_block structures.
static frame_block_list consolidate(frame_block_list &list);
};
} // namespace kutil

View File

@@ -1,36 +1,35 @@
#pragma once
/// \file heap_manager.h
/// A buddy allocator and related definitions.
/// \file heap_allocator.h
/// A buddy allocator for a memory heap
#include <stddef.h>
#include "kutil/allocator.h"
namespace kutil {
/// Manager for allocation of heap memory.
class heap_manager
/// Allocator for a given heap range
class heap_allocator :
public allocator
{
public:
/// Callback signature for growth function. Memory returned does not need
/// to be contiguous, but needs to be alined to the length requested.
using grow_callback = void * (*)(size_t length);
/// Default constructor creates a valid but empty heap.
heap_allocator();
/// Default constructor. Creates an invalid manager.
heap_manager();
/// Constructor.
/// \arg grow_cb Function pointer to grow the heap size
heap_manager(grow_callback grow_cb);
/// Constructor. The given memory area must already have been reserved.
/// \arg start Starting address of the heap
/// \arg size Size of the heap in bytes
heap_allocator(uintptr_t start, size_t size);
/// Allocate memory from the area managed.
/// \arg length The amount of memory to allocate, in bytes
/// \returns A pointer to the allocated memory, or nullptr if
/// allocation failed.
void * allocate(size_t length);
virtual void * allocate(size_t length) override;
/// Free a previous allocation.
/// \arg p A pointer previously retuned by allocate()
void free(void *p);
virtual void free(void *p) override;
/// Minimum block size is (2^min_size). Must be at least 6.
static const unsigned min_size = 6;
@@ -41,9 +40,6 @@ public:
protected:
class mem_header;
/// Expand the size of memory
void grow_memory();
/// Ensure there is a block of a given size, recursively splitting
/// \arg size Size category of the block we want
void ensure_block(unsigned size);
@@ -58,11 +54,11 @@ protected:
/// \returns A detached block of the given size
mem_header * pop_free(unsigned size);
uintptr_t m_next;
size_t m_size;
mem_header *m_free[max_size - min_size + 1];
grow_callback m_grow;
heap_manager(const heap_manager &) = delete;
heap_allocator(const heap_allocator &) = delete;
};
} // namespace kutil

View File

@@ -130,6 +130,18 @@ public:
other.m_count = 0;
}
/// Assignment operator. Takes ownership of list elements.
/// Destructive towards current data!
linked_list & operator=(linked_list &&other)
{
m_head = other.m_head;
m_tail = other.m_tail;
m_count = other.m_count;
other.m_head = other.m_tail = nullptr;
other.m_count = 0;
return *this;
}
/// Check if the list is empty.
/// \returns true if the list is empty
bool empty() const { return m_head == nullptr; }
@@ -140,7 +152,7 @@ public:
/// Count the items in the list.
/// \returns The number of entries in the list.
size_t count_length() const
size_t count_length()
{
size_t len = 0;
for (item_type *cur = m_head; cur; cur = cur->m_next) ++len;
@@ -274,14 +286,14 @@ public:
{
if (!item) return;
if (!existing)
if (!existing) {
push_back(item);
else if (existing == m_head)
} else if (existing == m_head) {
push_front(item);
else
} else {
existing->insert_before(item);
m_count += 1;
m_count += 1;
}
}
/// Inserts an item into the list after another given item.
@@ -291,14 +303,14 @@ public:
{
if (!item) return;
if (!existing)
if (!existing) {
push_front(item);
else if (existing == m_tail)
} else if (existing == m_tail) {
push_back(item);
else
} else {
existing->insert_after(item);
m_count += 1;
m_count += 1;
}
}
/// Insert an item into the list in a sorted position. Depends on T

View File

@@ -9,15 +9,6 @@ void * operator new (size_t, void *p) noexcept;
namespace kutil {
/// Allocate memory.
/// \arg n The number of bytes to allocate
/// \returns The allocated memory
void * malloc(size_t n);
/// Free memory allocated by malloc().
/// \arg p A pointer previously returned by malloc()
void free(void *p);
/// Fill memory with the given value.
/// \arg p The beginning of the memory area to fill
/// \arg v The byte value to fill memory with
@@ -67,14 +58,4 @@ inline T* mask_pointer(T *p, uintptr_t mask)
/// \arg off An optional offset into the region
uint8_t checksum(const void *p, size_t len, size_t off = 0);
class heap_manager;
namespace setup {
/// Set the heap that malloc() / free() will use.
/// \arg mm The heap manager for the heap to use.
void set_heap(heap_manager *mm);
} // namespace kutil::setup
} // namespace kutil

View File

@@ -1,6 +1,7 @@
#pragma once
/// \file slab_allocator.h
/// A slab allocator and related definitions
#include "kutil/allocator.h"
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/memory.h"
@@ -9,19 +10,17 @@ namespace kutil {
/// A slab allocator for small structures kept in a linked list
template <typename T, typename Alloc = void * (*)(size_t)>
template <typename T, size_t N = memory::frame_size>
class slab_allocator :
public linked_list<T>
{
public:
using item_type = list_node<T>;
using alloc_type = Alloc;
/// Default constructor.
/// \arg chunk_size The size of chunk to allocate, in bytes. 0 means default.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(size_t chunk_size = 0, Alloc alloc = malloc) :
m_chunk_size(chunk_size),
slab_allocator(allocator &alloc) :
m_alloc(alloc)
{
}
@@ -46,18 +45,16 @@ public:
void allocate()
{
size_t size = m_chunk_size ? m_chunk_size : 10 * sizeof(item_type);
void *memory = m_alloc(size);
size_t count = size / sizeof(item_type);
constexpr unsigned count = N / sizeof(item_type);
void *memory = m_alloc.allocate(N);
item_type *items = reinterpret_cast<item_type *>(memory);
for (size_t i = 0; i < count; ++i)
this->push_back(&items[i]);
}
private:
size_t m_chunk_size;
Alloc m_alloc;
allocator& m_alloc;
};
} // namespace kutil

View File

@@ -4,6 +4,7 @@
#include <algorithm>
#include <utility>
#include "kutil/allocator.h"
#include "kutil/memory.h"
namespace kutil {
@@ -14,18 +15,20 @@ class vector
{
public:
/// Default constructor. Creates an empty vector with no capacity.
vector() :
vector(kutil::allocator &alloc = allocator::invalid) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(alloc)
{}
/// Constructor. Creates an empty array with capacity.
/// \arg capacity Initial capacity to allocate
vector(size_t capacity) :
vector(size_t capacity, allocator &alloc) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(alloc)
{
set_capacity(capacity);
}
@@ -34,7 +37,8 @@ public:
vector(const vector& other) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(other.m_alloc)
{
set_capacity(other.m_capacity);
kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
@@ -45,7 +49,8 @@ public:
vector(vector&& other) :
m_size(other.m_size),
m_capacity(other.m_capacity),
m_elements(other.m_elements)
m_elements(other.m_elements),
m_alloc(other.m_alloc)
{
other.m_size = 0;
other.m_capacity = 0;
@@ -142,7 +147,7 @@ public:
/// \arg capacity Number of elements to allocate
void set_capacity(size_t capacity)
{
T *new_array = reinterpret_cast<T *>(malloc(capacity * sizeof(T)));
T *new_array = m_alloc.allocate<T>(capacity);
size_t size = std::min(capacity, m_size);
kutil::memcpy(new_array, m_elements, size * sizeof(T));
@@ -151,7 +156,7 @@ public:
m_size = size;
m_capacity = capacity;
delete [] m_elements;
m_alloc.free(m_elements);
m_elements = new_array;
}
@@ -159,6 +164,7 @@ private:
size_t m_size;
size_t m_capacity;
T *m_elements;
allocator &m_alloc;
};
} // namespace kutil

View File

@@ -1,46 +1,11 @@
#include "kutil/memory.h"
#include "kutil/heap_manager.h"
namespace std {
enum class __attribute__ ((__type_visibility("default"))) align_val_t : size_t { };
}
#ifdef __POPCORN__
void * operator new(size_t n, std::align_val_t) { return kutil::malloc(n); }
void * operator new (size_t n) { return kutil::malloc(n); }
void * operator new[] (size_t n) { return kutil::malloc(n); }
void operator delete (void *p) noexcept { return kutil::free(p); }
void operator delete[] (void *p) noexcept { return kutil::free(p); }
#endif
namespace kutil {
namespace setup {
static heap_manager *heap_memory_manager;
void
set_heap(heap_manager *mm)
{
setup::heap_memory_manager = mm;
}
} // namespace kutil::setup
void *
malloc(size_t n)
{
return setup::heap_memory_manager->allocate(n);
}
void
free(void *p)
{
setup::heap_memory_manager->free(p);
}
void *
memset(void *s, uint8_t v, size_t n)
{

View File

@@ -6,6 +6,7 @@
#include <stdint.h>
#include "kutil/address_manager.h"
#include "kutil/allocator.h"
#include "catch.hpp"
using namespace kutil;
@@ -14,9 +15,18 @@ static const size_t max_block = 1ull << 36;
static const size_t start = max_block;
static const size_t GB = 1ull << 30;
class malloc_allocator :
public kutil::allocator
{
public:
virtual void * allocate(size_t n) override { return malloc(n); }
virtual void free(void *p) override { free(p); }
};
TEST_CASE( "Buddy addresses tests", "[address buddy]" )
{
address_manager am;
malloc_allocator alloc;
address_manager am(alloc);
am.add_regions(start, max_block * 2);
// Blocks should be:

View File

@@ -1,45 +0,0 @@
#include "kutil/frame_allocator.h"
#include "catch.hpp"
using namespace kutil;
TEST_CASE( "Frame allocator tests", "[memory frame]" )
{
frame_block_list free;
frame_block_list used;
frame_block_list cache;
auto *f = new frame_block_list::item_type;
f->address = 0x1000;
f->count = 1;
f->flags = kutil::frame_block_flags::none;
free.sorted_insert(f);
auto *g = new frame_block_list::item_type;
g->address = 0x2000;
g->count = 1;
g->flags = kutil::frame_block_flags::none;
free.sorted_insert(g);
frame_allocator fa(std::move(cache));
fa.init(std::move(free), std::move(used));
fa.consolidate_blocks();
uintptr_t a = 0;
size_t c = fa.allocate(2, &a);
CHECK( a == 0x1000 );
CHECK( c == 2 );
fa.free(a, 2);
a = 0;
fa.consolidate_blocks();
c = fa.allocate(2, &a);
CHECK( a == 0x1000 );
CHECK( c == 2 );
delete f;
delete g;
}

View File

@@ -0,0 +1,170 @@
#include <chrono>
#include <random>
#include <vector>
#include <signal.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/mman.h>
#include "kutil/memory.h"
#include "kutil/heap_allocator.h"
#include "catch.hpp"
using namespace kutil;
const size_t hs = 0x10; // header size
const size_t max_block = 1 << 22;
int signalled = 0;
void *signalled_at = nullptr;
void *mem_base = nullptr;
std::vector<size_t> sizes = {
16000, 8000, 4000, 4000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 150,
150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 48, 48, 48, 13 };
void segfault_handler(int signum, siginfo_t *info, void *ctxp)
{
signalled += 1;
signalled_at = info->si_addr;
mprotect(signalled_at, max_block, PROT_READ|PROT_WRITE);
}
TEST_CASE( "Buddy blocks tests", "[memory buddy]" )
{
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
mem_base = aligned_alloc(max_block, max_block * 4);
// Catch segfaults so we can track memory access
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigemptyset(&sigact.sa_mask);
sigact.sa_flags = SA_NODEFER|SA_SIGINFO;
sigact.sa_sigaction = segfault_handler;
sigaction(SIGSEGV, &sigact, nullptr);
// Protect our memory arena so we trigger out fault handler
REQUIRE( mprotect(mem_base, max_block*4, PROT_NONE) == 0 );
heap_allocator mm(
reinterpret_cast<uintptr_t>(mem_base),
max_block * 4);
// Initial creation should not have allocated
CHECK( signalled == 0 );
signalled = 0;
// Allocating should signal just at the first page.
void *p = mm.allocate(max_block - hs);
CHECK( p == offset_pointer(mem_base, hs) );
CHECK( signalled == 1 );
CHECK( signalled_at == mem_base );
signalled = 0;
// Freeing and allocating should not allocate
mm.free(p);
p = mm.allocate(max_block - hs);
CHECK( p == offset_pointer(mem_base, hs) );
CHECK( signalled == 0 );
signalled = 0;
mm.free(p);
CHECK( signalled == 0 );
signalled = 0;
// Blocks should be:
// 22: 0-4M
std::vector<void *> allocs(6);
for (int i = 0; i < 6; ++i)
allocs[i] = mm.allocate(150); // size 8
// Should not have grown
CHECK( signalled == 0 );
signalled = 0;
// Blocks should be:
// 22: [0-4M]
// 21: [0-2M], 2-4M
// 20: [0-1M], 1-2M
// 19: [0-512K], 512K-1M
// 18: [0-256K], 256-512K
// 17: [0-128K], 128-256K
// 16: [0-64K], 64-128K
// 15: [0-32K], 32K-64K
// 14: [0-16K], 16K-32K
// 13: [0-8K], 8K-16K
// 12: [0-4K], 4K-8K
// 11: [0-2K], 2K-4K
// 10: [0-1K, 1-2K]
// 9: [0, 512, 1024], 1536
// 8: [0, 256, 512, 768, 1024, 1280]
// We have free memory at 1526 and 2K, but we should get 4K
void *big = mm.allocate(4000); // size 12
CHECK( signalled == 0 );
signalled = 0;
REQUIRE( big == offset_pointer(mem_base, 4096 + hs) );
mm.free(big);
// free up 512
mm.free(allocs[3]);
mm.free(allocs[4]);
// Blocks should be:
// ...
// 9: [0, 512, 1024], 1536
// 8: [0, 256, 512], 768, 1024, [1280]
// A request for a 512-block should not cross the buddy divide
big = mm.allocate(500); // size 9
REQUIRE( big >= offset_pointer(mem_base, 1536 + hs) );
mm.free(big);
mm.free(allocs[0]);
mm.free(allocs[1]);
mm.free(allocs[2]);
mm.free(allocs[5]);
allocs.clear();
std::shuffle(sizes.begin(), sizes.end(), rng);
allocs.reserve(sizes.size());
for (size_t size : sizes)
allocs.push_back(mm.allocate(size));
std::shuffle(allocs.begin(), allocs.end(), rng);
for (void *p: allocs)
mm.free(p);
allocs.clear();
big = mm.allocate(max_block / 2 + 1);
// If everything was freed / joined correctly, that should not have allocated
CHECK( signalled == 0 );
signalled = 0;
// And we should have gotten back the start of memory
CHECK( big == offset_pointer(mem_base, hs) );
// Allocating again should signal at the next page.
void *p2 = mm.allocate(max_block - hs);
CHECK( p2 == offset_pointer(mem_base, max_block + hs) );
CHECK( signalled == 1 );
CHECK( signalled_at == offset_pointer(mem_base, max_block) );
signalled = 0;
mm.free(p2);
CHECK( signalled == 0 );
signalled = 0;
free(mem_base);
}

View File

@@ -1,191 +0,0 @@
#include <chrono>
#include <random>
#include <vector>
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include "kutil/memory.h"
#include "kutil/heap_manager.h"
#include "catch.hpp"
using namespace kutil;
static std::vector<void *> memory;
static size_t total_alloc_size = 0;
static size_t total_alloc_calls = 0;
const size_t hs = 0x10; // header size
const size_t max_block = 1 << 16;
std::vector<size_t> sizes = {
16000, 8000, 4000, 4000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 150,
150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 48, 48, 48, 13 };
void * grow_callback(size_t length)
{
total_alloc_calls += 1;
total_alloc_size += length;
void *p = aligned_alloc(max_block, length * 2);
memory.push_back(p);
return p;
}
void free_memory()
{
for (void *p : memory) ::free(p);
memory.clear();
total_alloc_size = 0;
total_alloc_calls = 0;
}
TEST_CASE( "Buddy blocks tests", "[memory buddy]" )
{
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
heap_manager mm(grow_callback);
// The ctor should have allocated an initial block
CHECK( total_alloc_size == max_block );
CHECK( total_alloc_calls == 1 );
// Blocks should be:
// 16: 0-64K
std::vector<void *> allocs(6);
for (int i = 0; i < 6; ++i)
allocs[i] = mm.allocate(150); // size 8
// Should not have grown
CHECK( total_alloc_size == max_block );
CHECK( total_alloc_calls == 1 );
CHECK( memory[0] != nullptr );
// Blocks should be:
// 16: [0-64K]
// 15: [0-32K], 32K-64K
// 14: [0-16K], 16K-32K
// 13: [0-8K], 8K-16K
// 12: [0-4K], 4K-8K
// 11: [0-2K], 2K-4K
// 10: [0-1K, 1-2K]
// 9: [0, 512, 1024], 1536
// 8: [0, 256, 512, 768, 1024, 1280]
// We have free memory at 1526 and 2K, but we should get 4K
void *big = mm.allocate(4000); // size 12
REQUIRE( big == offset_pointer(memory[0], 4096 + hs) );
mm.free(big);
// free up 512
mm.free(allocs[3]);
mm.free(allocs[4]);
// Blocks should be:
// ...
// 9: [0, 512, 1024], 1536
// 8: [0, 256, 512], 768, 1024, [1280]
// A request for a 512-block should not cross the buddy divide
big = mm.allocate(500); // size 9
REQUIRE( big >= offset_pointer(memory[0], 1536 + hs) );
mm.free(big);
mm.free(allocs[0]);
mm.free(allocs[1]);
mm.free(allocs[2]);
mm.free(allocs[5]);
allocs.clear();
std::shuffle(sizes.begin(), sizes.end(), rng);
allocs.reserve(sizes.size());
for (size_t size : sizes)
allocs.push_back(mm.allocate(size));
std::shuffle(allocs.begin(), allocs.end(), rng);
for (void *p: allocs)
mm.free(p);
allocs.clear();
big = mm.allocate(64000);
// If everything was freed / joined correctly, that should not have allocated
CHECK( total_alloc_size == max_block );
CHECK( total_alloc_calls == 1 );
// And we should have gotten back the start of memory
CHECK( big == offset_pointer(memory[0], hs) );
free_memory();
}
bool check_in_memory(void *p)
{
for (void *mem : memory)
if (p >= mem && p <= offset_pointer(mem, max_block))
return true;
return false;
}
TEST_CASE( "Non-contiguous blocks tests", "[memory buddy]" )
{
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
heap_manager mm(grow_callback);
std::vector<void *> allocs;
const int blocks = 3;
for (int i = 0; i < blocks; ++i) {
void *p = mm.allocate(64000);
REQUIRE( memory[i] != nullptr );
REQUIRE( p == offset_pointer(memory[i], hs) );
allocs.push_back(p);
}
CHECK( total_alloc_size == max_block * blocks );
CHECK( total_alloc_calls == blocks );
for (void *p : allocs)
mm.free(p);
allocs.clear();
allocs.reserve(sizes.size() * blocks);
for (int i = 0; i < blocks; ++i) {
std::shuffle(sizes.begin(), sizes.end(), rng);
for (size_t size : sizes)
allocs.push_back(mm.allocate(size));
}
for (void *p : allocs)
CHECK( check_in_memory(p) );
std::shuffle(allocs.begin(), allocs.end(), rng);
for (void *p: allocs)
mm.free(p);
allocs.clear();
CHECK( total_alloc_size == max_block * blocks );
CHECK( total_alloc_calls == blocks );
for (int i = 0; i < blocks; ++i)
allocs.push_back(mm.allocate(64000));
// If everything was freed / joined correctly, that should not have allocated
CHECK( total_alloc_size == max_block * blocks );
CHECK( total_alloc_calls == blocks );
for (void *p : allocs)
CHECK( check_in_memory(p) );
free_memory();
}

View File

@@ -152,6 +152,7 @@ TEST_CASE( "Sorted list tests", "[containers list]" )
std::uniform_int_distribution<int> gen(1, 1000);
linked_list<sortableT> slist;
CHECK( slist.length() == 0 );
std::vector<list_node<sortableT>> sortables(test_list_size);
for (auto &i : sortables) {

View File

@@ -1,46 +1,2 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include <malloc.h>
void * operator new (size_t n) { return ::malloc(n); }
void * operator new[] (size_t n) { return ::malloc(n); }
void operator delete (void *p) noexcept { return ::free(p); }
void operator delete[] (void *p) noexcept { return ::free(p); }
#include "kutil/heap_manager.h"
#include "kutil/memory.h"
struct default_heap_listener :
public Catch::TestEventListenerBase
{
using TestEventListenerBase::TestEventListenerBase;
virtual void testCaseStarting(Catch::TestCaseInfo const& info) override
{
heap = new kutil::heap_manager(heap_grow_callback);
kutil::setup::set_heap(heap);
}
virtual void testCaseEnded(Catch::TestCaseStats const& stats) override
{
kutil::setup::set_heap(nullptr);
delete heap;
for (void *p : memory) ::free(p);
memory.clear();
}
static std::vector<void *> memory;
static kutil::heap_manager *heap;
static void * heap_grow_callback(size_t length) {
void *p = aligned_alloc(length, length);
memory.push_back(p);
return p;
}
};
std::vector<void *> default_heap_listener::memory;
kutil::heap_manager *default_heap_listener::heap;
CATCH_REGISTER_LISTENER( default_heap_listener );