[kernel] Remove explicit allocator passing

Many kernel objects had to keep a hold of refrences to allocators in
order to pass them on down the call chain. Remove those explicit
refrences and use `operator new`, `operator delete`, and define new
`kalloc` and `kfree`.

Also remove `slab_allocator` and replace it with a new mixin for slab
allocation, `slab_allocated`, that overrides `operator new` and
`operator free` for its subclass.

Remove some no longer used related headers, `buddy_allocator.h` and
`address_manager.h`

Tags: memory
This commit is contained in:
Justin C. Miller
2020-05-31 18:22:23 -07:00
parent 67b5f33d46
commit c6c3a556b3
19 changed files with 125 additions and 537 deletions

View File

@@ -49,7 +49,7 @@ class disk
public:
/// Constructor.
/// \arg start The start of the initrd in memory
disk(const void *start, kutil::allocator &alloc);
disk(const void *start);
/// Get the vector of files on the disk
const kutil::vector<file> & files() const { return m_files; }

View File

@@ -23,8 +23,7 @@ file::executable() const {
}
disk::disk(const void *start, kutil::allocator &alloc) :
m_files(alloc)
disk::disk(const void *start)
{
auto *header = reinterpret_cast<const disk_header *>(start);
size_t length = header->length;

View File

@@ -1,15 +0,0 @@
#pragma once
/// \file address_manager.h
/// The virtual memory address space manager
#include "kutil/buddy_allocator.h"
namespace kutil {
using address_manager =
buddy_allocator<
12, // Min allocation: 4KiB
36 // Max allocation: 64GiB
>;
} //namespace kutil

View File

@@ -5,6 +5,7 @@
#include <algorithm>
#include <stdint.h>
#include "kutil/assert.h"
#include "kutil/slab_allocated.h"
namespace kutil {
@@ -14,7 +15,8 @@ template <typename T> class avl_tree;
/// A node in a `avl_tree<T>`
template <typename T>
class avl_node :
public T
public T,
public slab_allocated<avl_node<T>>
{
public:
using item_type = T;
@@ -151,7 +153,7 @@ private:
return existing;
}
static node_type * remove(node_type *existing, node_type *subtrahend, allocator &alloc)
static node_type * remove(node_type *existing, node_type *subtrahend)
{
if (existing == nullptr)
return existing;
@@ -170,7 +172,7 @@ private:
*existing = *temp;
}
alloc.free(temp);
delete temp;
} else {
// Both children exist, find next node
node_type *temp = existing->m_right;
@@ -178,12 +180,12 @@ private:
temp = temp->m_left;
*existing = *temp;
existing->m_right = remove(existing->m_right, temp, alloc);
existing->m_right = remove(existing->m_right, temp);
}
} else if (existing->compare(subtrahend) < 0) {
existing->m_left = remove(existing->m_left, subtrahend, alloc);
existing->m_left = remove(existing->m_left, subtrahend);
} else {
existing->m_right = remove(existing->m_right, subtrahend, alloc);
existing->m_right = remove(existing->m_right, subtrahend);
}
if (!existing)
@@ -232,8 +234,8 @@ public:
inline node_type * root() { return m_root; }
inline unsigned count() const { return m_count; }
inline void remove(node_type *subtrahend, allocator &alloc) {
m_root = node_type::remove(m_root, subtrahend, alloc);
inline void remove(node_type *subtrahend) {
m_root = node_type::remove(m_root, subtrahend);
m_count--;
}

View File

@@ -1,302 +0,0 @@
#pragma once
/// \file buddy_allocator.h
/// Helper base class for buddy allocators with external node storage.
#include <stdint.h>
#include <utility>
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
namespace kutil {
struct buddy_region;
template<
unsigned size_min,
unsigned size_max,
typename region_type = buddy_region>
class buddy_allocator
{
public:
using region_node = list_node<region_type>;
using region_list = linked_list<region_type>;
static const size_t min_alloc = (1 << size_min);
static const size_t max_alloc = (1 << size_max);
/// Default constructor creates an invalid object.
buddy_allocator() : m_alloc(allocator::invalid) {}
/// Constructor.
/// \arg alloc Allocator to use for region nodes
buddy_allocator(allocator &alloc) : m_alloc(alloc) {}
/// Move-like constructor. Takes ownership of existing regions.
buddy_allocator(buddy_allocator &&other, allocator &alloc) :
m_alloc(alloc)
{
for (unsigned i = 0; i < buckets; ++i) {
m_free[i] = std::move(other.m_free[i]);
m_used[i] = std::move(other.m_used[i]);
}
}
/// Add address space to be managed.
/// \arg start Initial address in the managed range
/// \arg length Size of the managed range, in bytes
void add_regions(uintptr_t start, size_t length)
{
uintptr_t p = start;
unsigned size = size_max;
while (size >= size_min) {
size_t chunk_size = 1ull << size;
while (p + chunk_size <= start + length) {
region_node *r = m_alloc.pop();
r->size = size_max;
r->address = p;
free_bucket(size).sorted_insert(r);
p += chunk_size;
}
size--;
}
}
/// Allocate address space from the managed area.
/// \arg length The amount of memory to allocate, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure
uintptr_t allocate(size_t length)
{
unsigned size = size_min;
while ((1ull << size) < length)
if (size++ == size_max)
return 0;
unsigned request = size;
while (free_bucket(request).empty())
if (request++ == size_max)
return 0;
region_node *r = nullptr;
while (request > size) {
r = free_bucket(request).pop_front();
region_node *n = split(r);
request--;
free_bucket(request).sorted_insert(n);
if (request != size)
free_bucket(request).sorted_insert(r);
}
if (r == nullptr) r = free_bucket(size).pop_front();
used_bucket(size).sorted_insert(r);
return r->address;
}
/// Mark a region as allocated.
/// \arg start The start of the region
/// \arg length The size of the region, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure. This may be less than `start`.
uintptr_t mark(uintptr_t start, size_t length)
{
uintptr_t end = start + length;
region_node *found = nullptr;
for (unsigned i = size_max; i >= size_min && !found; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
free_bucket(i).remove(r);
found = r;
break;
}
}
}
kassert(found, "buddy_allocator::mark called for unknown region");
if (!found)
return 0;
found = maybe_split(found, start, end);
used_bucket(found->size).sorted_insert(found);
return found->address;
}
/// Mark a region as permanently allocated. The region is not returned,
/// as the block can never be freed. This may remove several smaller
/// regions in order to more closely fit the region described.
/// \arg start The start of the region
/// \arg length The size of the region, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure. This may be less than `start`.
void mark_permanent(uintptr_t start, size_t length)
{
uintptr_t end = start + length;
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
free_bucket(i).remove(r);
delete_region(r, start, end);
return;
}
}
}
kassert(false, "buddy_allocator::mark_permanent called for unknown region");
}
/// Free a previous allocation.
/// \arg p An address previously retuned by allocate()
void free(uintptr_t p)
{
region_node *found = find(p, true);
kassert(found, "buddy_allocator::free called for unknown region");
if (!found)
return;
used_bucket(found->size).remove(found);
free_bucket(found->size).sorted_insert(found);
while (auto *bud = get_buddy(found)) {
region_node *eld = found->elder() ? found : bud;
region_node *other = found->elder() ? bud : found;
free_bucket(other->size).remove(other);
m_alloc.push(other);
free_bucket(eld->size).remove(eld);
eld->size++;
free_bucket(eld->size).sorted_insert(eld);
found = eld;
}
}
/// Check if an allocation exists
/// \arg addr Address within the managed space
/// \returns True if the address is in a region currently allocated
bool contains(uintptr_t addr)
{
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : used_bucket(i)) {
if (r->contains(addr))
return true;
else if (r->address < addr)
break;
}
}
return false;
}
protected:
/// Split a region of the given size into two smaller regions, returning
/// the new latter half
region_node * split(region_node *reg)
{
region_node *other = m_alloc.pop();
other->size = --reg->size;
other->address = reg->address + (1ull << reg->size);
return other;
}
/// Find a node with the given address
region_node * find(uintptr_t p, bool used = true)
{
for (unsigned size = size_max; size >= size_min; --size) {
auto &list = used ? used_bucket(size) : free_bucket(size);
for (auto *f : list) {
if (f->address < p) continue;
if (f->address == p) return f;
break;
}
}
return nullptr;
}
/// Helper to get the buddy for a node, if it's adjacent
region_node * get_buddy(region_node *r)
{
region_node *b = r->elder() ? r->next() : r->prev();
if (b && b->address == r->buddy())
return b;
return nullptr;
}
region_node * maybe_split(region_node *reg, uintptr_t start, uintptr_t end)
{
while (reg->size > size_min) {
// Split if the request fits in the second half
if (start >= reg->half()) {
region_node *other = split(reg);
free_bucket(reg->size).sorted_insert(reg);
reg = other;
}
// Split if the request fits in the first half
else if (end <= reg->half()) {
region_node *other = split(reg);
free_bucket(other->size).sorted_insert(other);
}
// If neither, we've split as much as possible
else break;
}
return reg;
}
void delete_region(region_node *reg, uintptr_t start, uintptr_t end)
{
reg = maybe_split(reg, start, end);
size_t leading = start - reg->address;
size_t trailing = reg->end() - end;
if (leading > (1<<size_min) || trailing > (1<<size_min)) {
region_node *tail = split(reg);
delete_region(reg, start, reg->end());
delete_region(tail, tail->address, end);
} else {
m_alloc.push(reg);
}
}
region_list & used_bucket(unsigned size) { return m_used[size - size_min]; }
region_list & free_bucket(unsigned size) { return m_free[size - size_min]; }
static const unsigned buckets = (size_max - size_min + 1);
region_list m_free[buckets];
region_list m_used[buckets];
slab_allocator<region_type> m_alloc;
};
struct buddy_region
{
inline int compare(const buddy_region *o) const {
return address > o->address ? 1 : \
address < o->address ? -1 : 0;
}
inline uintptr_t end() const { return address + (1ull << size); }
inline uintptr_t half() const { return address + (1ull << (size - 1)); }
inline bool contains(uintptr_t p) const { return p >= address && p < end(); }
inline uintptr_t buddy() const { return address ^ (1ull << size); }
inline bool elder() const { return address < buddy(); }
uint16_t size;
uintptr_t address;
};
} // namespace kutil

View File

@@ -6,9 +6,20 @@
void * operator new (size_t, void *p) noexcept;
namespace kutil {
/// Allocate from the default allocator. Note this needs to be
/// implemented by users of the kutil library.
/// \arg size The size in bytes requested
/// \returns A pointer to the newly allocated memory,
/// or nullptr on error
void * kalloc(size_t size);
/// Free memory allocated by `kalloc`. Note this needs to be
/// implemented by users of the kutil library.
/// \arg p Pointer that was returned from a `kalloc` call
void kfree(void *p);
/// Fill memory with the given value.
/// \arg p The beginning of the memory area to fill
/// \arg v The byte value to fill memory with

View File

@@ -0,0 +1,52 @@
#pragma once
/// \file slab_allocated.h
/// A parent template class for slab-allocated objects
#include "kernel_memory.h"
#include "kutil/memory.h"
#include "kutil/vector.h"
namespace kutil {
template <typename T, unsigned N = 1>
class slab_allocated
{
public:
void * operator new(size_t size)
{
kassert(size == sizeof(T), "Slab allocator got wrong size allocation");
if (s_free.count() == 0)
allocate_chunk();
T *item = s_free.pop();
kutil::memset(item, 0, sizeof(T));
return item;
}
void operator delete(void *p) { s_free.append(reinterpret_cast<T*>(p)); }
// TODO: get rid of this terribleness
static void hacky_init_remove_me() {
memset(&s_free, 0, sizeof(s_free));
}
private:
static void allocate_chunk()
{
size_t size = N * ::memory::frame_size;
s_free.ensure_capacity(size / sizeof(T));
void *memory = kalloc(size);
T *current = reinterpret_cast<T *>(memory);
T *end = offset_pointer(current, size);
while (current < end)
s_free.append(current++);
}
static vector<T*> s_free;
};
#define DEFINE_SLAB_ALLOCATOR(type, N) \
template<> ::kutil::vector<type*> kutil::slab_allocated<type, N>::s_free {};
} // namespace kutil

View File

@@ -1,72 +0,0 @@
#pragma once
/// \file slab_allocator.h
/// A slab allocator and related definitions
#include "kutil/allocator.h"
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/memory.h"
namespace kutil {
/// A slab allocator for small structures kept in a linked list
template <typename T, size_t N = memory::frame_size>
class slab_allocator :
public linked_list<T>,
public allocator
{
public:
using item_type = list_node<T>;
/// Default constructor.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(allocator &alloc) :
m_alloc(alloc) {}
/// Allocator interface implementation
virtual void * allocate(size_t size) override
{
kassert(size == sizeof(T), "Asked slab allocator for wrong size");
if (size != sizeof(T)) return 0;
return pop();
}
/// Allocator interface implementation
virtual void free(void *p) override
{
push(static_cast<item_type*>(p));
}
/// Get an item from the cache. May allocate a new chunk if the cache is empty.
/// \returns An allocated element
inline item_type * pop()
{
if (this->empty()) this->allocate_chunk();
kassert(!this->empty(), "Slab allocator is empty after allocate()");
item_type *item = this->pop_front();
kutil::memset(item, 0, sizeof(item_type));
return item;
}
/// Return an item to the cache.
/// \arg item A previously allocated element
inline void push(item_type *item)
{
this->push_front(item);
}
private:
void allocate_chunk()
{
constexpr unsigned count = N / sizeof(item_type);
void *memory = m_alloc.allocate(N);
item_type *items = reinterpret_cast<item_type *>(memory);
for (size_t i = 0; i < count; ++i)
this->push_back(&items[i]);
}
allocator& m_alloc;
};
} // namespace kutil

View File

@@ -15,20 +15,18 @@ class vector
{
public:
/// Default constructor. Creates an empty vector with no capacity.
vector(kutil::allocator &alloc = allocator::invalid) :
vector() :
m_size(0),
m_capacity(0),
m_elements(nullptr),
m_alloc(alloc)
m_elements(nullptr)
{}
/// Constructor. Creates an empty array with capacity.
/// \arg capacity Initial capacity to allocate
vector(size_t capacity, allocator &alloc) :
vector(size_t capacity) :
m_size(0),
m_capacity(0),
m_elements(nullptr),
m_alloc(alloc)
m_elements(nullptr)
{
set_capacity(capacity);
}
@@ -37,8 +35,7 @@ public:
vector(const vector& other) :
m_size(0),
m_capacity(0),
m_elements(nullptr),
m_alloc(other.m_alloc)
m_elements(nullptr)
{
set_capacity(other.m_capacity);
kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
@@ -49,8 +46,7 @@ public:
vector(vector&& other) :
m_size(other.m_size),
m_capacity(other.m_capacity),
m_elements(other.m_elements),
m_alloc(other.m_alloc)
m_elements(other.m_elements)
{
other.m_size = 0;
other.m_capacity = 0;
@@ -61,7 +57,7 @@ public:
~vector()
{
while (m_size) remove();
m_alloc.free(m_elements);
kfree(m_elements);
}
/// Get the size of the array.
@@ -117,6 +113,14 @@ public:
m_elements[m_size].~T();
}
/// Remove an item from the end of the array and return it.
T pop()
{
T temp = m_elements[m_size - 1];
remove();
return temp;
}
/// Set the size of the array. Any new items are default constructed.
/// Any items past the end are deleted. The array is realloced if needed.
/// \arg size The new size
@@ -149,7 +153,7 @@ public:
/// \arg capacity Number of elements to allocate
void set_capacity(size_t capacity)
{
T *new_array = m_alloc.allocate<T>(capacity);
T *new_array = reinterpret_cast<T*>(kalloc(capacity * sizeof(T)));
size_t size = std::min(capacity, m_size);
kutil::memcpy(new_array, m_elements, size * sizeof(T));
@@ -158,7 +162,7 @@ public:
m_size = size;
m_capacity = capacity;
m_alloc.free(m_elements);
kfree(m_elements);
m_elements = new_array;
}
@@ -166,7 +170,6 @@ private:
size_t m_size;
size_t m_capacity;
T *m_elements;
allocator &m_alloc;
};
} // namespace kutil

View File

@@ -5,7 +5,6 @@
#include <stdint.h>
#include "kutil/allocator.h"
#include "kutil/avl_tree.h"
#include "kutil/slab_allocator.h"
namespace kutil {
@@ -39,8 +38,7 @@ public:
/// Constructor. Define a range of managed VM space.
/// \arg start Starting address of the managed space
/// \arg size Size of the managed space, in bytes
/// \arg alloc Allocator to use for tracking objects
vm_space(uintptr_t start, size_t size, kutil::allocator &alloc);
vm_space(uintptr_t start, size_t size);
/// Reserve a section of address space.
/// \arg start Starting address of reservaion, or 0 for any address
@@ -76,8 +74,6 @@ private:
node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state);
node_type * consolidate(node_type* needle);
slab_allocator<node_type> m_slab;
allocator &m_alloc;
tree_type m_ranges;
};

View File

@@ -8,11 +8,14 @@ namespace kutil {
using node_type = kutil::avl_node<vm_range>;
using node_vec = kutil::vector<node_type*>;
vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
m_slab(alloc),
m_alloc(alloc)
DEFINE_SLAB_ALLOCATOR(node_type, 1);
vm_space::vm_space(uintptr_t start, size_t size)
{
node_type *node = m_slab.pop();
// TODO: replace this with real global ctor
slab_allocated<node_type>::hacky_init_remove_me();
node_type *node = new node_type;
node->address = start;
node->size = size;
node->state = vm_state::none;
@@ -22,9 +25,7 @@ vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
start, start+size);
}
vm_space::vm_space() :
m_slab(allocator::invalid),
m_alloc(allocator::invalid)
vm_space::vm_space()
{
}
@@ -77,7 +78,7 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
// Split off rest into new node
size_t leading = start - node->address;
node_type *next = m_slab.pop();
node_type *next = new node_type;
next->state = state;
next->address = start;
next->size = node->size - leading;
@@ -100,7 +101,7 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
size_t trailing = node->size - size;
node->size -= trailing;
node_type *next = m_slab.pop();
node_type *next = new node_type;
next->state = old_state;
next->address = node->end();
next->size = trailing;
@@ -132,7 +133,7 @@ inline void gather(node_type *node, node_vec &vec)
node_type *
vm_space::consolidate(node_type *needle)
{
node_vec nodes(m_ranges.count(), m_alloc);
node_vec nodes(m_ranges.count());
gather(m_ranges.root(), nodes);
node_type *prev = nullptr;
@@ -150,7 +151,7 @@ vm_space::consolidate(node_type *needle)
prev->size += node->size;
if (needle == node)
needle = prev;
m_ranges.remove(node, m_slab);
m_ranges.remove(node);
} else {
prev = node;
}