Use the address_manager to place allocations

This commit is contained in:
Justin C. Miller
2019-02-28 00:35:43 -08:00
parent 8cdc39fdee
commit 28cf5562ac
7 changed files with 122 additions and 61 deletions

View File

@@ -1,5 +1,6 @@
#include <algorithm>
#include <utility>
#include "kutil/address_manager.h"
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/heap_manager.h"
@@ -16,17 +17,21 @@ static const size_t page_size = page_manager::page_size;
extern kutil::frame_allocator g_frame_allocator;
kutil::address_manager g_kernel_address_manager;
kutil::heap_manager g_kernel_heap_manager;
void * mm_grow_callback(void *next, size_t length)
void * mm_grow_callback(size_t length)
{
kassert(length % page_manager::page_size == 0,
"Heap manager requested a fractional page.");
size_t pages = length / page_manager::page_size;
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
g_page_manager.map_pages(reinterpret_cast<uintptr_t>(next), pages);
return next;
uintptr_t addr = g_kernel_address_manager.allocate(length);
g_page_manager.map_pages(addr, pages);
return reinterpret_cast<void *>(addr);
}
@@ -59,6 +64,8 @@ namespace {
using block_allocator =
kutil::slab_allocator<kutil::frame_block, page_consumer &>;
using region_allocator =
kutil::slab_allocator<kutil::buddy_region, page_consumer &>;
}
enum class efi_memory_type : uint32_t
@@ -151,7 +158,9 @@ gather_block_lists(
case efi_memory_type::popcorn_kernel:
block_used = true;
block->flags = frame_block_flags::map_kernel;
block->flags =
frame_block_flags::permanent |
frame_block_flags::map_kernel;
break;
case efi_memory_type::popcorn_data:
@@ -297,6 +306,16 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
block_slab.append(frame_block::consolidate(free));
block_slab.append(frame_block::consolidate(used));
region_allocator region_slab(page_size, allocator);
region_slab.allocate(); // Allocate some buddy regions for the address_manager
kutil::address_manager *am =
new (&g_kernel_address_manager) kutil::address_manager(std::move(region_slab));
am->add_regions(
page_manager::high_offset,
page_manager::page_offset - page_manager::high_offset);
// Finally, build an acutal set of kernel page tables that just contains
// what the kernel actually has mapped, but making everything writable
// (especially the page tables themselves)
@@ -312,8 +331,6 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
reinterpret_cast<void *>(allocator.current),
allocator.left());
uintptr_t heap_start = page_manager::high_offset;
for (auto *block : used) {
uintptr_t virt_addr = 0;
@@ -324,8 +341,10 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
case frame_block_flags::map_kernel:
virt_addr = block->address + page_manager::high_offset;
heap_start = std::max(heap_start,
virt_addr + block->count * page_size);
if (block->flags && frame_block_flags::permanent)
am->mark_permanent(virt_addr, block->count * page_size);
else
am->mark(virt_addr, block->count * page_size);
break;
case frame_block_flags::map_offset:
@@ -348,8 +367,6 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
pm->m_kernel_pml4 = pml4;
// Set the heap manager
new (&g_kernel_heap_manager) kutil::heap_manager(
reinterpret_cast<void *>(heap_start),
mm_grow_callback);
new (&g_kernel_heap_manager) kutil::heap_manager(mm_grow_callback);
kutil::setup::set_heap(&g_kernel_heap_manager);
}

View File

@@ -68,16 +68,11 @@ private:
heap_manager::heap_manager() :
m_start(nullptr),
m_length(0),
m_grow(nullptr)
{
kutil::memset(m_free, 0, sizeof(m_free));
}
heap_manager::heap_manager(void *start, grow_callback grow_cb) :
m_start(start),
m_length(0),
heap_manager::heap_manager(grow_callback grow_cb) :
m_grow(grow_cb)
{
kutil::memset(m_free, 0, sizeof(m_free));
@@ -125,13 +120,12 @@ heap_manager::grow_memory()
size_t length = (1 << max_size);
kassert(m_grow, "Tried to grow heap without a growth callback");
void *next = m_grow(kutil::offset_pointer(m_start, m_length), length);
void *next = m_grow(length);
mem_header *block = new (next) mem_header(nullptr, get_free(max_size), max_size);
get_free(max_size) = block;
if (block->next())
block->next()->set_prev(block);
m_length += length;
}
void

View File

@@ -19,10 +19,24 @@ template<
class buddy_allocator
{
public:
using region_node = list_node<region_type>;
using region_list = linked_list<region_type>;
/// Constructor.
buddy_allocator() {}
/// Constructor with an initial cache of region structs from bootstrapped
/// memory.
/// \arg cache List of pre-allocated ununused region_type structures
buddy_allocator(region_list cache)
{
m_alloc.append(cache);
}
/// Add address space to be managed.
/// \arg start Initial address in the managed range
/// \arg length Size of the managed range, in bytes
buddy_allocator(uintptr_t start, size_t length)
void add_regions(uintptr_t start, size_t length)
{
uintptr_t p = start;
unsigned size = size_max;
@@ -89,6 +103,7 @@ public:
for (unsigned i = size_max; i >= size_min && !found; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
free_bucket(i).remove(r);
found = r;
break;
}
@@ -99,29 +114,33 @@ public:
if (!found)
return 0;
while (found->size > size_min) {
// Split if the request fits in the second half
if (start >= found->half()) {
region_node *other = split(found);
free_bucket(found->size).sorted_insert(found);
found = other;
}
// Split if the request fits in the first half
else if (start + length < found->half()) {
region_node *other = split(found);
free_bucket(other->size).sorted_insert(other);
}
// If neither, we've split as much as possible
else
break;
}
found = maybe_split(found, start, end);
used_bucket(found->size).sorted_insert(found);
return found->address;
}
/// Mark a region as permanently allocated. The region is not returned,
/// as the block can never be freed. This may remove several smaller
/// regions in order to more closely fit the region described.
/// \arg start The start of the region
/// \arg length The size of the region, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure. This may be less than `start`.
void mark_permanent(uintptr_t start, size_t length)
{
uintptr_t end = start + length;
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
delete_region(r, start, end);
return;
}
}
}
kassert(false, "buddy_allocator::mark_permanent called for unknown region");
}
/// Free a previous allocation.
/// \arg p An address previously retuned by allocate()
void free(uintptr_t p)
@@ -151,10 +170,6 @@ public:
}
protected:
using region_node = list_node<region_type>;
using region_list = linked_list<region_type>;
/// Split a region of the given size into two smaller regions, returning
/// the new latter half
region_node * split(region_node *reg)
@@ -189,10 +204,48 @@ protected:
return nullptr;
}
region_node * maybe_split(region_node *reg, uintptr_t start, uintptr_t end)
{
while (reg->size > size_min) {
// Split if the request fits in the second half
if (start >= reg->half()) {
region_node *other = split(reg);
free_bucket(reg->size).sorted_insert(reg);
reg = other;
}
// Split if the request fits in the first half
else if (end <= reg->half()) {
region_node *other = split(reg);
free_bucket(other->size).sorted_insert(other);
}
// If neither, we've split as much as possible
else break;
}
return reg;
}
void delete_region(region_node *reg, uintptr_t start, uintptr_t end)
{
reg = maybe_split(reg, start, end);
size_t leading = start - reg->address;
size_t trailing = reg->end() - end;
if (leading > (1<<size_min) || trailing > (1<<size_min)) {
region_node *tail = split(reg);
delete_region(reg, start, reg->end());
delete_region(tail, tail->address, end);
} else {
m_alloc.push(reg);
}
}
region_list & used_bucket(unsigned size) { return m_used[size - size_min]; }
region_list & free_bucket(unsigned size) { return m_free[size - size_min]; }
static const unsigned buckets = (size_max - size_min);
static const unsigned buckets = (size_max - size_min + 1);
region_list m_free[buckets];
region_list m_used[buckets];

View File

@@ -11,18 +11,16 @@ namespace kutil {
class heap_manager
{
public:
/// Callback signature for growth function. The next pointer is just a
/// hint; memory returned does not need to be contiguous, but needs to be
/// alined to the length requested.
using grow_callback = void * (*)(void *next, size_t length);
/// Callback signature for growth function. Memory returned does not need
/// to be contiguous, but needs to be alined to the length requested.
using grow_callback = void * (*)(size_t length);
/// Default constructor. Creates an invalid manager.
heap_manager();
/// Constructor.
/// \arg start Pointer to the start of the heap to be managed
/// \arg grow_cb Function pointer to grow the heap size
heap_manager(void *start, grow_callback grow_cb);
heap_manager(grow_callback grow_cb);
/// Allocate memory from the area managed.
/// \arg length The amount of memory to allocate, in bytes
@@ -60,9 +58,7 @@ protected:
/// \returns A detached block of the given size
mem_header * pop_free(unsigned size);
mem_header *m_free[max_size - min_size];
void *m_start;
size_t m_length;
mem_header *m_free[max_size - min_size + 1];
grow_callback m_grow;

View File

@@ -12,7 +12,7 @@
using namespace kutil;
extern void * grow_callback(void*, size_t);
extern void * grow_callback(size_t);
extern void free_memory();
const size_t max_block = 1ull << 36;
@@ -21,14 +21,15 @@ const size_t GB = 1ull << 30;
TEST_CASE( "Buddy addresses tests", "[address buddy]" )
{
heap_manager mm(nullptr, grow_callback);
heap_manager mm(grow_callback);
kutil::setup::set_heap(&mm);
using clock = std::chrono::system_clock;
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
address_manager am(start, max_block * 2);
address_manager am;
am.add_regions(start, max_block * 2);
// Blocks should be:
// 36: 0-64G, 64-128G

View File

@@ -5,7 +5,7 @@
using namespace kutil;
extern void * grow_callback(void*, size_t);
extern void * grow_callback(size_t);
extern void free_memory();
const size_t max_block = 1ull << 36;
@@ -14,7 +14,7 @@ const size_t GB = 1ull << 30;
TEST_CASE( "Frame allocator tests", "[memory frame]" )
{
heap_manager mm(nullptr, grow_callback);
heap_manager mm(grow_callback);
kutil::setup::set_heap(&mm);
frame_block_list free;

View File

@@ -23,7 +23,7 @@ std::vector<size_t> sizes = {
16000, 8000, 4000, 4000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 150,
150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 48, 48, 48, 13 };
void * grow_callback(void *start, size_t length)
void * grow_callback(size_t length)
{
total_alloc_calls += 1;
total_alloc_size += length;
@@ -47,7 +47,7 @@ TEST_CASE( "Buddy blocks tests", "[memory buddy]" )
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
heap_manager mm(nullptr, grow_callback);
heap_manager mm(grow_callback);
// The ctor should have allocated an initial block
CHECK( total_alloc_size == max_block );
@@ -138,7 +138,7 @@ TEST_CASE( "Non-contiguous blocks tests", "[memory buddy]" )
unsigned seed = clock::now().time_since_epoch().count();
std::default_random_engine rng(seed);
heap_manager mm(nullptr, grow_callback);
heap_manager mm(grow_callback);
std::vector<void *> allocs;
const int blocks = 3;