[kernel] Remove explicit allocator passing

Many kernel objects had to keep a hold of refrences to allocators in
order to pass them on down the call chain. Remove those explicit
refrences and use `operator new`, `operator delete`, and define new
`kalloc` and `kfree`.

Also remove `slab_allocator` and replace it with a new mixin for slab
allocation, `slab_allocated`, that overrides `operator new` and
`operator free` for its subclass.

Remove some no longer used related headers, `buddy_allocator.h` and
`address_manager.h`

Tags: memory
This commit is contained in:
Justin C. Miller
2020-05-31 18:22:23 -07:00
parent 67b5f33d46
commit c6c3a556b3
19 changed files with 125 additions and 537 deletions

View File

@@ -60,12 +60,7 @@ void irq4_callback(void *)
device_manager::device_manager(const void *root_table, kutil::allocator &alloc) : device_manager::device_manager(const void *root_table, kutil::allocator &alloc) :
m_lapic(nullptr), m_lapic(nullptr)
m_ioapics(alloc),
m_pci(alloc),
m_devices(alloc),
m_irqs(alloc),
m_blockdevs(alloc)
{ {
kassert(root_table != 0, "ACPI root table pointer is null."); kassert(root_table != 0, "ACPI root table pointer is null.");

View File

@@ -99,13 +99,13 @@ kernel_main(args::header *header)
log::debug(logs::boot, "Runtime service is at: %016lx", header->runtime_services); log::debug(logs::boot, "Runtime service is at: %016lx", header->runtime_services);
// Load the module tagged as initrd // Load the module tagged as initrd
kutil::vector<initrd::disk> initrds(heap); kutil::vector<initrd::disk> initrds;
for (unsigned i = 0; i < header->num_modules; ++i) { for (unsigned i = 0; i < header->num_modules; ++i) {
args::module &mod = header->modules[i]; args::module &mod = header->modules[i];
if (mod.type != args::mod_type::initrd) if (mod.type != args::mod_type::initrd)
continue; continue;
initrd::disk &ird = initrds.emplace(mod.location, heap); initrd::disk &ird = initrds.emplace(mod.location);
log::info(logs::boot, "initrd loaded with %d files.", ird.files().count()); log::info(logs::boot, "initrd loaded with %d files.", ird.files().count());
for (auto &f : ird.files()) for (auto &f : ird.files())
log::info(logs::boot, " %s%s (%d bytes).", f.executable() ? "*" : "", f.name(), f.size()); log::info(logs::boot, " %s%s (%d bytes).", f.executable() ? "*" : "", f.name(), f.size());
@@ -159,7 +159,7 @@ kernel_main(args::header *header)
devices->get_lapic()->calibrate_timer(); devices->get_lapic()->calibrate_timer();
syscall_enable(); syscall_enable();
scheduler *sched = new (&scheduler::get()) scheduler(devices->get_lapic(), heap); scheduler *sched = new (&scheduler::get()) scheduler(devices->get_lapic());
sched->create_kernel_task(-1, logger_task); sched->create_kernel_task(-1, logger_task);

View File

@@ -31,6 +31,11 @@ void * operator new [] (size_t size) { return g_kernel_heap.allocate(size)
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); } void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); } void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); }
namespace kutil {
void * kalloc(size_t size) { return g_kernel_heap.allocate(size); }
void kfree(void *p) { return g_kernel_heap.free(p); }
}
void walk_page_table( void walk_page_table(
page_table *table, page_table *table,
page_table::level level, page_table::level level,
@@ -90,8 +95,7 @@ memory_initialize(args::header *kargs)
new (&g_kernel_space) kutil::vm_space { new (&g_kernel_space) kutil::vm_space {
kernel_offset, kernel_offset,
(page_offset-kernel_offset), (page_offset-kernel_offset)};
g_kernel_heap};
uintptr_t current_start = 0; uintptr_t current_start = 0;

View File

@@ -12,7 +12,6 @@ static j6_koid_t next_koid;
kobject::kobject(type t, j6_signal_t signals) : kobject::kobject(type t, j6_signal_t signals) :
m_koid(koid_generate(t)), m_koid(koid_generate(t)),
m_signals(signals), m_signals(signals),
m_observers(g_kernel_heap),
m_handle_count(0) m_handle_count(0)
{} {}

View File

@@ -1,4 +1,4 @@
#include "kutil/heap_allocator.h" #include "kutil/assert.h"
#include "cpu.h" #include "cpu.h"
#include "debug.h" #include "debug.h"
#include "log.h" #include "log.h"
@@ -6,7 +6,6 @@
#include "scheduler.h" #include "scheduler.h"
extern "C" void task_fork_return_thunk(); extern "C" void task_fork_return_thunk();
extern kutil::heap_allocator g_kernel_heap; // TODO: this is a bad hack to get access to the heap
void void
process::exit(uint32_t code) process::exit(uint32_t code)
@@ -68,7 +67,7 @@ process::setup_kernel_stack()
constexpr unsigned null_frame_entries = 2; constexpr unsigned null_frame_entries = 2;
constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t); constexpr size_t null_frame_size = null_frame_entries * sizeof(uint64_t);
void *stack_bottom = g_kernel_heap.allocate(initial_stack_size); void *stack_bottom = kutil::kalloc(initial_stack_size);
kutil::memset(stack_bottom, 0, initial_stack_size); kutil::memset(stack_bottom, 0, initial_stack_size);
log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx", log::debug(logs::memory, "Created kernel stack at %016lx size 0x%lx",

View File

@@ -16,7 +16,7 @@
using memory::initial_stack; using memory::initial_stack;
scheduler scheduler::s_instance(nullptr, kutil::allocator::invalid); scheduler scheduler::s_instance(nullptr);
const uint64_t rflags_noint = 0x002; const uint64_t rflags_noint = 0x002;
const uint64_t rflags_int = 0x202; const uint64_t rflags_int = 0x202;
@@ -28,12 +28,11 @@ extern "C" {
extern uint64_t idle_stack_end; extern uint64_t idle_stack_end;
scheduler::scheduler(lapic *apic, kutil::allocator &alloc) : scheduler::scheduler(lapic *apic) :
m_apic(apic), m_apic(apic),
m_next_pid(1), m_next_pid(1)
m_process_allocator(alloc)
{ {
auto *idle = m_process_allocator.pop(); auto *idle = new process_node;
uint8_t last_pri = num_priorities - 1; uint8_t last_pri = num_priorities - 1;
// The kernel idle task, also the thread we're in now // The kernel idle task, also the thread we're in now
@@ -121,7 +120,7 @@ scheduler::create_process(pid_t pid)
{ {
kassert(pid <= 0, "Cannot specify a positive pid in create_process"); kassert(pid <= 0, "Cannot specify a positive pid in create_process");
auto *proc = m_process_allocator.pop(); auto *proc = new process_node;
proc->pid = pid ? pid : m_next_pid++; proc->pid = pid ? pid : m_next_pid++;
proc->priority = default_priority; proc->priority = default_priority;
return proc; return proc;
@@ -233,7 +232,7 @@ void scheduler::prune(uint64_t now)
if (parent && parent->wake_on_child(remove)) { if (parent && parent->wake_on_child(remove)) {
m_blocked.remove(parent); m_blocked.remove(parent);
m_runlists[parent->priority].push_front(parent); m_runlists[parent->priority].push_front(parent);
m_process_allocator.push(remove); delete remove;
} else { } else {
m_exited.push_back(remove); m_exited.push_back(remove);
} }

View File

@@ -4,7 +4,6 @@
#include <stdint.h> #include <stdint.h>
#include "kutil/allocator.h" #include "kutil/allocator.h"
#include "kutil/slab_allocator.h"
#include "process.h" #include "process.h"
class lapic; class lapic;
@@ -31,8 +30,7 @@ public:
/// Constructor. /// Constructor.
/// \arg apic Pointer to the local APIC object /// \arg apic Pointer to the local APIC object
/// \arg alloc Allocator to use for TCBs scheduler(lapic *apic);
scheduler(lapic *apic, kutil::allocator &alloc);
/// Create a new process from a program image in memory. /// Create a new process from a program image in memory.
/// \arg name Name of the program image /// \arg name Name of the program image
@@ -86,9 +84,6 @@ private:
uint32_t m_next_pid; uint32_t m_next_pid;
uint32_t m_tick_count; uint32_t m_tick_count;
using process_slab = kutil::slab_allocator<process>;
process_slab m_process_allocator;
process_node *m_current; process_node *m_current;
process_list m_runlists[num_priorities]; process_list m_runlists[num_priorities];
process_list m_blocked; process_list m_blocked;

View File

@@ -49,7 +49,7 @@ class disk
public: public:
/// Constructor. /// Constructor.
/// \arg start The start of the initrd in memory /// \arg start The start of the initrd in memory
disk(const void *start, kutil::allocator &alloc); disk(const void *start);
/// Get the vector of files on the disk /// Get the vector of files on the disk
const kutil::vector<file> & files() const { return m_files; } const kutil::vector<file> & files() const { return m_files; }

View File

@@ -23,8 +23,7 @@ file::executable() const {
} }
disk::disk(const void *start, kutil::allocator &alloc) : disk::disk(const void *start)
m_files(alloc)
{ {
auto *header = reinterpret_cast<const disk_header *>(start); auto *header = reinterpret_cast<const disk_header *>(start);
size_t length = header->length; size_t length = header->length;

View File

@@ -1,15 +0,0 @@
#pragma once
/// \file address_manager.h
/// The virtual memory address space manager
#include "kutil/buddy_allocator.h"
namespace kutil {
using address_manager =
buddy_allocator<
12, // Min allocation: 4KiB
36 // Max allocation: 64GiB
>;
} //namespace kutil

View File

@@ -5,6 +5,7 @@
#include <algorithm> #include <algorithm>
#include <stdint.h> #include <stdint.h>
#include "kutil/assert.h" #include "kutil/assert.h"
#include "kutil/slab_allocated.h"
namespace kutil { namespace kutil {
@@ -14,7 +15,8 @@ template <typename T> class avl_tree;
/// A node in a `avl_tree<T>` /// A node in a `avl_tree<T>`
template <typename T> template <typename T>
class avl_node : class avl_node :
public T public T,
public slab_allocated<avl_node<T>>
{ {
public: public:
using item_type = T; using item_type = T;
@@ -151,7 +153,7 @@ private:
return existing; return existing;
} }
static node_type * remove(node_type *existing, node_type *subtrahend, allocator &alloc) static node_type * remove(node_type *existing, node_type *subtrahend)
{ {
if (existing == nullptr) if (existing == nullptr)
return existing; return existing;
@@ -170,7 +172,7 @@ private:
*existing = *temp; *existing = *temp;
} }
alloc.free(temp); delete temp;
} else { } else {
// Both children exist, find next node // Both children exist, find next node
node_type *temp = existing->m_right; node_type *temp = existing->m_right;
@@ -178,12 +180,12 @@ private:
temp = temp->m_left; temp = temp->m_left;
*existing = *temp; *existing = *temp;
existing->m_right = remove(existing->m_right, temp, alloc); existing->m_right = remove(existing->m_right, temp);
} }
} else if (existing->compare(subtrahend) < 0) { } else if (existing->compare(subtrahend) < 0) {
existing->m_left = remove(existing->m_left, subtrahend, alloc); existing->m_left = remove(existing->m_left, subtrahend);
} else { } else {
existing->m_right = remove(existing->m_right, subtrahend, alloc); existing->m_right = remove(existing->m_right, subtrahend);
} }
if (!existing) if (!existing)
@@ -232,8 +234,8 @@ public:
inline node_type * root() { return m_root; } inline node_type * root() { return m_root; }
inline unsigned count() const { return m_count; } inline unsigned count() const { return m_count; }
inline void remove(node_type *subtrahend, allocator &alloc) { inline void remove(node_type *subtrahend) {
m_root = node_type::remove(m_root, subtrahend, alloc); m_root = node_type::remove(m_root, subtrahend);
m_count--; m_count--;
} }

View File

@@ -1,302 +0,0 @@
#pragma once
/// \file buddy_allocator.h
/// Helper base class for buddy allocators with external node storage.
#include <stdint.h>
#include <utility>
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
namespace kutil {
struct buddy_region;
template<
unsigned size_min,
unsigned size_max,
typename region_type = buddy_region>
class buddy_allocator
{
public:
using region_node = list_node<region_type>;
using region_list = linked_list<region_type>;
static const size_t min_alloc = (1 << size_min);
static const size_t max_alloc = (1 << size_max);
/// Default constructor creates an invalid object.
buddy_allocator() : m_alloc(allocator::invalid) {}
/// Constructor.
/// \arg alloc Allocator to use for region nodes
buddy_allocator(allocator &alloc) : m_alloc(alloc) {}
/// Move-like constructor. Takes ownership of existing regions.
buddy_allocator(buddy_allocator &&other, allocator &alloc) :
m_alloc(alloc)
{
for (unsigned i = 0; i < buckets; ++i) {
m_free[i] = std::move(other.m_free[i]);
m_used[i] = std::move(other.m_used[i]);
}
}
/// Add address space to be managed.
/// \arg start Initial address in the managed range
/// \arg length Size of the managed range, in bytes
void add_regions(uintptr_t start, size_t length)
{
uintptr_t p = start;
unsigned size = size_max;
while (size >= size_min) {
size_t chunk_size = 1ull << size;
while (p + chunk_size <= start + length) {
region_node *r = m_alloc.pop();
r->size = size_max;
r->address = p;
free_bucket(size).sorted_insert(r);
p += chunk_size;
}
size--;
}
}
/// Allocate address space from the managed area.
/// \arg length The amount of memory to allocate, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure
uintptr_t allocate(size_t length)
{
unsigned size = size_min;
while ((1ull << size) < length)
if (size++ == size_max)
return 0;
unsigned request = size;
while (free_bucket(request).empty())
if (request++ == size_max)
return 0;
region_node *r = nullptr;
while (request > size) {
r = free_bucket(request).pop_front();
region_node *n = split(r);
request--;
free_bucket(request).sorted_insert(n);
if (request != size)
free_bucket(request).sorted_insert(r);
}
if (r == nullptr) r = free_bucket(size).pop_front();
used_bucket(size).sorted_insert(r);
return r->address;
}
/// Mark a region as allocated.
/// \arg start The start of the region
/// \arg length The size of the region, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure. This may be less than `start`.
uintptr_t mark(uintptr_t start, size_t length)
{
uintptr_t end = start + length;
region_node *found = nullptr;
for (unsigned i = size_max; i >= size_min && !found; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
free_bucket(i).remove(r);
found = r;
break;
}
}
}
kassert(found, "buddy_allocator::mark called for unknown region");
if (!found)
return 0;
found = maybe_split(found, start, end);
used_bucket(found->size).sorted_insert(found);
return found->address;
}
/// Mark a region as permanently allocated. The region is not returned,
/// as the block can never be freed. This may remove several smaller
/// regions in order to more closely fit the region described.
/// \arg start The start of the region
/// \arg length The size of the region, in bytes
/// \returns The address of the start of the allocated area, or 0 on
/// failure. This may be less than `start`.
void mark_permanent(uintptr_t start, size_t length)
{
uintptr_t end = start + length;
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : free_bucket(i)) {
if (start >= r->address && end <= r->end()) {
free_bucket(i).remove(r);
delete_region(r, start, end);
return;
}
}
}
kassert(false, "buddy_allocator::mark_permanent called for unknown region");
}
/// Free a previous allocation.
/// \arg p An address previously retuned by allocate()
void free(uintptr_t p)
{
region_node *found = find(p, true);
kassert(found, "buddy_allocator::free called for unknown region");
if (!found)
return;
used_bucket(found->size).remove(found);
free_bucket(found->size).sorted_insert(found);
while (auto *bud = get_buddy(found)) {
region_node *eld = found->elder() ? found : bud;
region_node *other = found->elder() ? bud : found;
free_bucket(other->size).remove(other);
m_alloc.push(other);
free_bucket(eld->size).remove(eld);
eld->size++;
free_bucket(eld->size).sorted_insert(eld);
found = eld;
}
}
/// Check if an allocation exists
/// \arg addr Address within the managed space
/// \returns True if the address is in a region currently allocated
bool contains(uintptr_t addr)
{
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : used_bucket(i)) {
if (r->contains(addr))
return true;
else if (r->address < addr)
break;
}
}
return false;
}
protected:
/// Split a region of the given size into two smaller regions, returning
/// the new latter half
region_node * split(region_node *reg)
{
region_node *other = m_alloc.pop();
other->size = --reg->size;
other->address = reg->address + (1ull << reg->size);
return other;
}
/// Find a node with the given address
region_node * find(uintptr_t p, bool used = true)
{
for (unsigned size = size_max; size >= size_min; --size) {
auto &list = used ? used_bucket(size) : free_bucket(size);
for (auto *f : list) {
if (f->address < p) continue;
if (f->address == p) return f;
break;
}
}
return nullptr;
}
/// Helper to get the buddy for a node, if it's adjacent
region_node * get_buddy(region_node *r)
{
region_node *b = r->elder() ? r->next() : r->prev();
if (b && b->address == r->buddy())
return b;
return nullptr;
}
region_node * maybe_split(region_node *reg, uintptr_t start, uintptr_t end)
{
while (reg->size > size_min) {
// Split if the request fits in the second half
if (start >= reg->half()) {
region_node *other = split(reg);
free_bucket(reg->size).sorted_insert(reg);
reg = other;
}
// Split if the request fits in the first half
else if (end <= reg->half()) {
region_node *other = split(reg);
free_bucket(other->size).sorted_insert(other);
}
// If neither, we've split as much as possible
else break;
}
return reg;
}
void delete_region(region_node *reg, uintptr_t start, uintptr_t end)
{
reg = maybe_split(reg, start, end);
size_t leading = start - reg->address;
size_t trailing = reg->end() - end;
if (leading > (1<<size_min) || trailing > (1<<size_min)) {
region_node *tail = split(reg);
delete_region(reg, start, reg->end());
delete_region(tail, tail->address, end);
} else {
m_alloc.push(reg);
}
}
region_list & used_bucket(unsigned size) { return m_used[size - size_min]; }
region_list & free_bucket(unsigned size) { return m_free[size - size_min]; }
static const unsigned buckets = (size_max - size_min + 1);
region_list m_free[buckets];
region_list m_used[buckets];
slab_allocator<region_type> m_alloc;
};
struct buddy_region
{
inline int compare(const buddy_region *o) const {
return address > o->address ? 1 : \
address < o->address ? -1 : 0;
}
inline uintptr_t end() const { return address + (1ull << size); }
inline uintptr_t half() const { return address + (1ull << (size - 1)); }
inline bool contains(uintptr_t p) const { return p >= address && p < end(); }
inline uintptr_t buddy() const { return address ^ (1ull << size); }
inline bool elder() const { return address < buddy(); }
uint16_t size;
uintptr_t address;
};
} // namespace kutil

View File

@@ -6,9 +6,20 @@
void * operator new (size_t, void *p) noexcept; void * operator new (size_t, void *p) noexcept;
namespace kutil { namespace kutil {
/// Allocate from the default allocator. Note this needs to be
/// implemented by users of the kutil library.
/// \arg size The size in bytes requested
/// \returns A pointer to the newly allocated memory,
/// or nullptr on error
void * kalloc(size_t size);
/// Free memory allocated by `kalloc`. Note this needs to be
/// implemented by users of the kutil library.
/// \arg p Pointer that was returned from a `kalloc` call
void kfree(void *p);
/// Fill memory with the given value. /// Fill memory with the given value.
/// \arg p The beginning of the memory area to fill /// \arg p The beginning of the memory area to fill
/// \arg v The byte value to fill memory with /// \arg v The byte value to fill memory with

View File

@@ -0,0 +1,52 @@
#pragma once
/// \file slab_allocated.h
/// A parent template class for slab-allocated objects
#include "kernel_memory.h"
#include "kutil/memory.h"
#include "kutil/vector.h"
namespace kutil {
template <typename T, unsigned N = 1>
class slab_allocated
{
public:
void * operator new(size_t size)
{
kassert(size == sizeof(T), "Slab allocator got wrong size allocation");
if (s_free.count() == 0)
allocate_chunk();
T *item = s_free.pop();
kutil::memset(item, 0, sizeof(T));
return item;
}
void operator delete(void *p) { s_free.append(reinterpret_cast<T*>(p)); }
// TODO: get rid of this terribleness
static void hacky_init_remove_me() {
memset(&s_free, 0, sizeof(s_free));
}
private:
static void allocate_chunk()
{
size_t size = N * ::memory::frame_size;
s_free.ensure_capacity(size / sizeof(T));
void *memory = kalloc(size);
T *current = reinterpret_cast<T *>(memory);
T *end = offset_pointer(current, size);
while (current < end)
s_free.append(current++);
}
static vector<T*> s_free;
};
#define DEFINE_SLAB_ALLOCATOR(type, N) \
template<> ::kutil::vector<type*> kutil::slab_allocated<type, N>::s_free {};
} // namespace kutil

View File

@@ -1,72 +0,0 @@
#pragma once
/// \file slab_allocator.h
/// A slab allocator and related definitions
#include "kutil/allocator.h"
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/memory.h"
namespace kutil {
/// A slab allocator for small structures kept in a linked list
template <typename T, size_t N = memory::frame_size>
class slab_allocator :
public linked_list<T>,
public allocator
{
public:
using item_type = list_node<T>;
/// Default constructor.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(allocator &alloc) :
m_alloc(alloc) {}
/// Allocator interface implementation
virtual void * allocate(size_t size) override
{
kassert(size == sizeof(T), "Asked slab allocator for wrong size");
if (size != sizeof(T)) return 0;
return pop();
}
/// Allocator interface implementation
virtual void free(void *p) override
{
push(static_cast<item_type*>(p));
}
/// Get an item from the cache. May allocate a new chunk if the cache is empty.
/// \returns An allocated element
inline item_type * pop()
{
if (this->empty()) this->allocate_chunk();
kassert(!this->empty(), "Slab allocator is empty after allocate()");
item_type *item = this->pop_front();
kutil::memset(item, 0, sizeof(item_type));
return item;
}
/// Return an item to the cache.
/// \arg item A previously allocated element
inline void push(item_type *item)
{
this->push_front(item);
}
private:
void allocate_chunk()
{
constexpr unsigned count = N / sizeof(item_type);
void *memory = m_alloc.allocate(N);
item_type *items = reinterpret_cast<item_type *>(memory);
for (size_t i = 0; i < count; ++i)
this->push_back(&items[i]);
}
allocator& m_alloc;
};
} // namespace kutil

View File

@@ -15,20 +15,18 @@ class vector
{ {
public: public:
/// Default constructor. Creates an empty vector with no capacity. /// Default constructor. Creates an empty vector with no capacity.
vector(kutil::allocator &alloc = allocator::invalid) : vector() :
m_size(0), m_size(0),
m_capacity(0), m_capacity(0),
m_elements(nullptr), m_elements(nullptr)
m_alloc(alloc)
{} {}
/// Constructor. Creates an empty array with capacity. /// Constructor. Creates an empty array with capacity.
/// \arg capacity Initial capacity to allocate /// \arg capacity Initial capacity to allocate
vector(size_t capacity, allocator &alloc) : vector(size_t capacity) :
m_size(0), m_size(0),
m_capacity(0), m_capacity(0),
m_elements(nullptr), m_elements(nullptr)
m_alloc(alloc)
{ {
set_capacity(capacity); set_capacity(capacity);
} }
@@ -37,8 +35,7 @@ public:
vector(const vector& other) : vector(const vector& other) :
m_size(0), m_size(0),
m_capacity(0), m_capacity(0),
m_elements(nullptr), m_elements(nullptr)
m_alloc(other.m_alloc)
{ {
set_capacity(other.m_capacity); set_capacity(other.m_capacity);
kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T)); kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
@@ -49,8 +46,7 @@ public:
vector(vector&& other) : vector(vector&& other) :
m_size(other.m_size), m_size(other.m_size),
m_capacity(other.m_capacity), m_capacity(other.m_capacity),
m_elements(other.m_elements), m_elements(other.m_elements)
m_alloc(other.m_alloc)
{ {
other.m_size = 0; other.m_size = 0;
other.m_capacity = 0; other.m_capacity = 0;
@@ -61,7 +57,7 @@ public:
~vector() ~vector()
{ {
while (m_size) remove(); while (m_size) remove();
m_alloc.free(m_elements); kfree(m_elements);
} }
/// Get the size of the array. /// Get the size of the array.
@@ -117,6 +113,14 @@ public:
m_elements[m_size].~T(); m_elements[m_size].~T();
} }
/// Remove an item from the end of the array and return it.
T pop()
{
T temp = m_elements[m_size - 1];
remove();
return temp;
}
/// Set the size of the array. Any new items are default constructed. /// Set the size of the array. Any new items are default constructed.
/// Any items past the end are deleted. The array is realloced if needed. /// Any items past the end are deleted. The array is realloced if needed.
/// \arg size The new size /// \arg size The new size
@@ -149,7 +153,7 @@ public:
/// \arg capacity Number of elements to allocate /// \arg capacity Number of elements to allocate
void set_capacity(size_t capacity) void set_capacity(size_t capacity)
{ {
T *new_array = m_alloc.allocate<T>(capacity); T *new_array = reinterpret_cast<T*>(kalloc(capacity * sizeof(T)));
size_t size = std::min(capacity, m_size); size_t size = std::min(capacity, m_size);
kutil::memcpy(new_array, m_elements, size * sizeof(T)); kutil::memcpy(new_array, m_elements, size * sizeof(T));
@@ -158,7 +162,7 @@ public:
m_size = size; m_size = size;
m_capacity = capacity; m_capacity = capacity;
m_alloc.free(m_elements); kfree(m_elements);
m_elements = new_array; m_elements = new_array;
} }
@@ -166,7 +170,6 @@ private:
size_t m_size; size_t m_size;
size_t m_capacity; size_t m_capacity;
T *m_elements; T *m_elements;
allocator &m_alloc;
}; };
} // namespace kutil } // namespace kutil

View File

@@ -5,7 +5,6 @@
#include <stdint.h> #include <stdint.h>
#include "kutil/allocator.h" #include "kutil/allocator.h"
#include "kutil/avl_tree.h" #include "kutil/avl_tree.h"
#include "kutil/slab_allocator.h"
namespace kutil { namespace kutil {
@@ -39,8 +38,7 @@ public:
/// Constructor. Define a range of managed VM space. /// Constructor. Define a range of managed VM space.
/// \arg start Starting address of the managed space /// \arg start Starting address of the managed space
/// \arg size Size of the managed space, in bytes /// \arg size Size of the managed space, in bytes
/// \arg alloc Allocator to use for tracking objects vm_space(uintptr_t start, size_t size);
vm_space(uintptr_t start, size_t size, kutil::allocator &alloc);
/// Reserve a section of address space. /// Reserve a section of address space.
/// \arg start Starting address of reservaion, or 0 for any address /// \arg start Starting address of reservaion, or 0 for any address
@@ -76,8 +74,6 @@ private:
node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state); node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state);
node_type * consolidate(node_type* needle); node_type * consolidate(node_type* needle);
slab_allocator<node_type> m_slab;
allocator &m_alloc;
tree_type m_ranges; tree_type m_ranges;
}; };

View File

@@ -8,11 +8,14 @@ namespace kutil {
using node_type = kutil::avl_node<vm_range>; using node_type = kutil::avl_node<vm_range>;
using node_vec = kutil::vector<node_type*>; using node_vec = kutil::vector<node_type*>;
vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) : DEFINE_SLAB_ALLOCATOR(node_type, 1);
m_slab(alloc),
m_alloc(alloc) vm_space::vm_space(uintptr_t start, size_t size)
{ {
node_type *node = m_slab.pop(); // TODO: replace this with real global ctor
slab_allocated<node_type>::hacky_init_remove_me();
node_type *node = new node_type;
node->address = start; node->address = start;
node->size = size; node->size = size;
node->state = vm_state::none; node->state = vm_state::none;
@@ -22,9 +25,7 @@ vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
start, start+size); start, start+size);
} }
vm_space::vm_space() : vm_space::vm_space()
m_slab(allocator::invalid),
m_alloc(allocator::invalid)
{ {
} }
@@ -77,7 +78,7 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
// Split off rest into new node // Split off rest into new node
size_t leading = start - node->address; size_t leading = start - node->address;
node_type *next = m_slab.pop(); node_type *next = new node_type;
next->state = state; next->state = state;
next->address = start; next->address = start;
next->size = node->size - leading; next->size = node->size - leading;
@@ -100,7 +101,7 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
size_t trailing = node->size - size; size_t trailing = node->size - size;
node->size -= trailing; node->size -= trailing;
node_type *next = m_slab.pop(); node_type *next = new node_type;
next->state = old_state; next->state = old_state;
next->address = node->end(); next->address = node->end();
next->size = trailing; next->size = trailing;
@@ -132,7 +133,7 @@ inline void gather(node_type *node, node_vec &vec)
node_type * node_type *
vm_space::consolidate(node_type *needle) vm_space::consolidate(node_type *needle)
{ {
node_vec nodes(m_ranges.count(), m_alloc); node_vec nodes(m_ranges.count());
gather(m_ranges.root(), nodes); gather(m_ranges.root(), nodes);
node_type *prev = nullptr; node_type *prev = nullptr;
@@ -150,7 +151,7 @@ vm_space::consolidate(node_type *needle)
prev->size += node->size; prev->size += node->size;
if (needle == node) if (needle == node)
needle = prev; needle = prev;
m_ranges.remove(node, m_slab); m_ranges.remove(node);
} else { } else {
prev = node; prev = node;
} }

View File

@@ -1,78 +0,0 @@
#include <chrono>
#include <random>
#include <vector>
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
#include "kutil/address_manager.h"
#include "kutil/allocator.h"
#include "catch.hpp"
using namespace kutil;
static const size_t max_block = 1ull << 36;
static const size_t start = max_block;
static const size_t GB = 1ull << 30;
class malloc_allocator :
public kutil::allocator
{
public:
virtual void * allocate(size_t n) override { return malloc(n); }
virtual void free(void *p) override { free(p); }
};
TEST_CASE( "Buddy addresses tests", "[address buddy]" )
{
malloc_allocator alloc;
address_manager am(alloc);
am.add_regions(start, max_block * 2);
// Blocks should be:
// 36: 0-64G, 64-128G
uintptr_t a = am.allocate(0x400); // under min
uintptr_t b = am.allocate(0x400);
CHECK( b == a + address_manager::min_alloc);
am.free(a);
am.free(b);
// Should be back to
// 36: 0-64G, 64-128G
a = am.allocate(max_block);
CHECK(a == start);
am.free(a);
// Should be back to
// 36: 0-64G, 64-128G
// This should allocate the 66-68G block, not the
// 66-67G block.
a = am.mark( start + 66 * GB + 0x1000, GB );
CHECK( a == start + 66 * GB );
// Free blocks should be:
// 36: 0-64G
// 35: 96-128G
// 34: 80-96G
// 33: 72-80G
// 32: 68-72G
// 31: 64-66G
// This should cause a split of the one 31 block, NOT
// be a leftover of the above mark.
b = am.allocate(GB);
CHECK( b == start + 64 * GB );
am.free(b);
am.free(a);
// Should be back to
// 36: 0-64G, 64-128G
a = am.allocate(max_block);
b = am.allocate(max_block);
CHECK( b == start + max_block );
CHECK( a == start );
}