Overhaul memory allocation model

This commit makes several fundamental changes to memory handling:

- the frame allocator is now only an allocator for free frames, and does
  not track used frames.
- the frame allocator now stores its free list inside the free frames
  themselves, as a hybrid stack/span model.
  - This has the implication that all frames must currently fit within
    the offset area.
- kutil has a new allocator interface, which is the only allowed way for
  any code outside of src/kernel to allocate. Code under src/kernel
  _may_ use new/delete, but should prefer the allocator interface.
- the heap manager has become heap_allocator, which is merely an
  implementation of kutil::allocator which doles out sections of a given
  address range.
- the heap manager now only writes block headers when necessary,
  avoiding page faults until they're actually needed
- page_manager now has a page fault handler, which checks with the
  address_manager to see if the address is known, and provides a frame
  mapping if it is, allowing heap manager to work with its entire
  address size from the start. (Currently 32GiB.)
This commit is contained in:
Justin C. Miller
2019-04-16 01:13:09 -07:00
parent fd1adc0262
commit 6302e8b73a
33 changed files with 782 additions and 1010 deletions

View File

@@ -49,7 +49,7 @@ class disk
public:
/// Constructor.
/// \arg start The start of the initrd in memory
disk(const void *start);
disk(const void *start, kutil::allocator &alloc);
/// Get the vector of files on the disk
const kutil::vector<file> & files() const { return m_files; }

View File

@@ -23,7 +23,8 @@ file::executable() const {
}
disk::disk(const void *start)
disk::disk(const void *start, kutil::allocator &alloc) :
m_files(alloc)
{
auto *header = reinterpret_cast<const disk_header *>(start);
size_t length = header->length;

View File

@@ -1,190 +0,0 @@
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/memory.h"
namespace kutil {
using memory::frame_size;
using memory::page_offset;
int
frame_block::compare(const frame_block *rhs) const
{
if (address < rhs->address)
return -1;
else if (address > rhs->address)
return 1;
return 0;
}
frame_block_list
frame_block::consolidate(frame_block_list &list)
{
frame_block_list freed;
for (auto *cur : list) {
auto *next = cur->next();
while ( next &&
cur->flags == next->flags &&
cur->end() == next->address) {
cur->count += next->count;
list.remove(next);
freed.push_back(next);
}
}
return freed;
}
void
frame_block::zero()
{
address = 0;
count = 0;
flags = frame_block_flags::none;
}
void
frame_block::copy(frame_block *other)
{
address = other->address;
count = other->count;
flags = other->flags;
}
frame_allocator::frame_allocator(
frame_block_list cache)
{
m_cache.append(cache);
}
void
frame_allocator::init(
frame_block_list free,
frame_block_list used)
{
m_free.append(free);
m_used.append(used);
}
list_node<frame_block> *
frame_allocator::get_block_node()
{
if (m_cache.empty()) {
auto *first = m_free.front();
frame_block_node * start =
reinterpret_cast<frame_block_node*>(first->address + page_offset);
frame_block_node * end = offset_pointer(start, frame_size);
if (first->count == 1) {
m_free.remove(first);
} else {
first->count--;
first->address += frame_size;
}
while (start < end) {
m_cache.push_back(start);
start++;
}
}
return m_cache.pop_front();
}
void
frame_allocator::consolidate_blocks()
{
m_cache.append(frame_block::consolidate(m_free));
m_cache.append(frame_block::consolidate(m_used));
}
size_t
frame_allocator::allocate(size_t count, uintptr_t *address)
{
kassert(!m_free.empty(), "frame_allocator::pop_frames ran out of free frames!");
auto *first = m_free.front();
unsigned n = count < first->count ? count : first->count;
*address = first->address;
if (count >= first->count) {
m_free.remove(first);
m_used.sorted_insert(first);
} else {
auto *used = get_block_node();
used->copy(first);
used->count = n;
m_used.sorted_insert(used);
first->address += n * frame_size;
first->count -= n;
}
consolidate_blocks();
return n;
}
void
frame_allocator::free(uintptr_t address, size_t count)
{
size_t block_count = 0;
for (auto *block : m_used) {
if (!block->contains(address)) continue;
size_t size = frame_size * count;
uintptr_t end = address + size;
size_t leading = address - block->address;
size_t trailing =
end > block->end() ?
0 : (block->end() - end);
if (leading) {
size_t frames = leading / frame_size;
auto *lead_block = get_block_node();
lead_block->copy(block);
lead_block->count = frames;
block->count -= frames;
block->address += leading;
m_used.insert_before(block, lead_block);
}
if (trailing) {
size_t frames = trailing / frame_size;
auto *trail_block = get_block_node();
trail_block->copy(block);
trail_block->count = frames;
trail_block->address += size;
block->count -= frames;
m_used.insert_before(block, trail_block);
}
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
address += block->count * frame_size;
count -= block->count;
if (!count)
break;
}
kassert(block_count, "Couldn't find existing allocated frames to free");
consolidate_blocks();
}
} // namespace kutil

View File

@@ -1,12 +1,11 @@
#include <stdint.h>
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "kutil/heap_manager.h"
#include "kutil/heap_allocator.h"
namespace kutil {
struct heap_manager::mem_header
struct heap_allocator::mem_header
{
mem_header(mem_header *prev, mem_header *next, uint8_t size) :
m_prev(prev), m_next(next)
@@ -14,34 +13,29 @@ struct heap_manager::mem_header
set_size(size);
}
inline void set_size(uint8_t size)
{
inline void set_size(uint8_t size) {
m_prev = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(prev()) | (size & 0x3f));
}
inline void set_used(bool used)
{
inline void set_used(bool used) {
m_next = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(next()) | (used ? 1 : 0));
}
inline void set_next(mem_header *next)
{
inline void set_next(mem_header *next) {
bool u = used();
m_next = next;
set_used(u);
}
inline void set_prev(mem_header *prev)
{
inline void set_prev(mem_header *prev) {
uint8_t s = size();
m_prev = prev;
set_size(s);
}
void remove()
{
void remove() {
if (next()) next()->set_prev(prev());
if (prev()) prev()->set_next(next());
set_prev(nullptr);
@@ -67,25 +61,24 @@ private:
};
heap_manager::heap_manager() :
m_grow(nullptr)
{
}
heap_allocator::heap_allocator() : m_next(0), m_size(0) {}
heap_manager::heap_manager(grow_callback grow_cb) :
m_grow(grow_cb)
heap_allocator::heap_allocator(uintptr_t start, size_t size) :
m_next(start), m_size(size)
{
kutil::memset(m_free, 0, sizeof(m_free));
grow_memory();
}
void *
heap_manager::allocate(size_t length)
heap_allocator::allocate(size_t length)
{
size_t total = length + sizeof(mem_header);
unsigned size = min_size;
while (total > (1 << size)) size++;
kassert(size <= max_size, "Tried to allocate a block bigger than max_size");
if (size > max_size)
return nullptr;
mem_header *header = pop_free(size);
header->set_used(true);
@@ -93,18 +86,28 @@ heap_manager::allocate(size_t length)
}
void
heap_manager::free(void *p)
heap_allocator::free(void *p)
{
if (!p) return;
mem_header *header = reinterpret_cast<mem_header *>(p);
header -= 1; // p points after the header
header->set_used(false);
while (header->size() != max_size) {
auto size = header->size();
mem_header *buddy = header->buddy();
if (buddy->used() || buddy->size() != header->size()) break;
if (buddy->used() || buddy->size() != size)
break;
if (get_free(size) == buddy)
get_free(size) = buddy->next();
buddy->remove();
header = header->eldest() ? header : buddy;
header->set_size(header->size() + 1);
header->set_size(size + 1);
}
uint8_t size = header->size();
@@ -115,47 +118,60 @@ heap_manager::free(void *p)
}
void
heap_manager::grow_memory()
heap_allocator::ensure_block(unsigned size)
{
size_t length = (1 << max_size);
kassert(m_grow, "Tried to grow heap without a growth callback");
void *next = m_grow(length);
mem_header *block = new (next) mem_header(nullptr, get_free(max_size), max_size);
get_free(max_size) = block;
if (block->next())
block->next()->set_prev(block);
}
void
heap_manager::ensure_block(unsigned size)
{
if (get_free(size) != nullptr) return;
else if (size == max_size) {
grow_memory();
if (get_free(size) != nullptr)
return;
if (size == max_size) {
size_t bytes = (1 << max_size);
if (bytes <= m_size) {
mem_header *next = reinterpret_cast<mem_header *>(m_next);
new (next) mem_header(nullptr, nullptr, size);
get_free(size) = next;
m_next += bytes;
m_size -= bytes;
}
} else {
mem_header *orig = pop_free(size + 1);
if (orig) {
mem_header *next = kutil::offset_pointer(orig, 1 << size);
new (next) mem_header(orig, nullptr, size);
orig->set_next(next);
orig->set_size(size);
get_free(size) = orig;
}
}
mem_header *orig = pop_free(size + 1);
mem_header *next = kutil::offset_pointer(orig, 1 << size);
new (next) mem_header(orig, nullptr, size);
orig->set_next(next);
orig->set_size(size);
get_free(size) = orig;
}
heap_manager::mem_header *
heap_manager::pop_free(unsigned size)
heap_allocator::mem_header *
heap_allocator::pop_free(unsigned size)
{
ensure_block(size);
mem_header *block = get_free(size);
get_free(size) = block->next();
block->remove();
if (block) {
get_free(size) = block->next();
block->remove();
}
return block;
}
class invalid_allocator :
public allocator
{
public:
virtual void * allocate(size_t) override {
kassert(false, "Attempting to allocate from allocator::invalid");
return nullptr;
}
virtual void free(void *) override {
kassert(false, "Attempting to free from allocator::invalid");
}
} _invalid_allocator;
allocator &allocator::invalid = _invalid_allocator;
} // namespace kutil

View File

@@ -0,0 +1,31 @@
#pragma once
/// \file allocator.h
/// Allocator interface
#include <stdint.h>
#include "kernel_memory.h"
namespace kutil {
class allocator
{
public:
/// Allocate memory.
/// \arg length The amount of memory to allocate, in bytes
/// \returns A pointer to the allocated memory, or nullptr if
/// allocation failed.
virtual void * allocate(size_t size) = 0;
/// Free a previous allocation.
/// \arg p A pointer previously retuned by allocate()
virtual void free(void *p) = 0;
template <typename T>
inline T * allocate(unsigned count) {
return reinterpret_cast<T*>(allocate(count * sizeof(T)));
}
static allocator &invalid;
};
} // namespace kutil

View File

@@ -3,6 +3,7 @@
/// Helper base class for buddy allocators with external node storage.
#include <stdint.h>
#include <utility>
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
@@ -25,15 +26,21 @@ public:
static const size_t min_alloc = (1 << size_min);
static const size_t max_alloc = (1 << size_max);
/// Constructor.
buddy_allocator() {}
/// Default constructor creates an invalid object.
buddy_allocator() : m_alloc(allocator::invalid) {}
/// Constructor with an initial cache of region structs from bootstrapped
/// memory.
/// \arg cache List of pre-allocated ununused region_type structures
buddy_allocator(region_list cache)
/// Constructor.
/// \arg alloc Allocator to use for region nodes
buddy_allocator(allocator &alloc) : m_alloc(alloc) {}
/// Move-like constructor. Takes ownership of existing regions.
buddy_allocator(buddy_allocator &&other, allocator &alloc) :
m_alloc(alloc)
{
m_alloc.append(cache);
for (unsigned i = 0; i < buckets; ++i) {
m_free[i] = std::move(other.m_free[i]);
m_used[i] = std::move(other.m_used[i]);
}
}
/// Add address space to be managed.
@@ -173,6 +180,23 @@ public:
}
}
/// Check if an allocation exists
/// \arg addr Address within the managed space
/// \returns True if the address is in a region currently allocated
bool contains(uintptr_t addr)
{
for (unsigned i = size_max; i >= size_min; --i) {
for (auto *r : used_bucket(i)) {
if (r->contains(addr))
return true;
else if (r->address < addr)
break;
}
}
return false;
}
protected:
/// Split a region of the given size into two smaller regions, returning
/// the new latter half
@@ -266,6 +290,8 @@ struct buddy_region
inline uintptr_t end() const { return address + (1ull << size); }
inline uintptr_t half() const { return address + (1ull << (size - 1)); }
inline bool contains(uintptr_t p) const { return p >= address && p < end(); }
inline uintptr_t buddy() const { return address ^ (1ull << size); }
inline bool elder() const { return address < buddy(); }

View File

@@ -1,122 +0,0 @@
#pragma once
/// \file frame_allocator.h
/// Allocator for physical memory frames
#include <stdint.h>
#include "kernel_memory.h"
#include "kutil/enum_bitfields.h"
#include "kutil/linked_list.h"
namespace kutil {
struct frame_block;
using frame_block_list = linked_list<frame_block>;
/// Allocator for physical memory frames
class frame_allocator
{
public:
/// Default constructor
frame_allocator() = default;
/// Constructor with a provided initial frame_block cache.
/// \arg cache List of pre-allocated but unused frame_block structures
frame_allocator(frame_block_list cache);
/// Initialize the frame allocator from bootstraped memory.
/// \arg free List of free blocks
/// \arg used List of currently used blocks
void init(
frame_block_list free,
frame_block_list used);
/// Get free frames from the free list. Only frames from the first free block
/// are returned, so the number may be less than requested, but they will
/// be contiguous.
/// \arg count The maximum number of frames to get
/// \arg address [out] The physical address of the first frame
/// \returns The number of frames retrieved
size_t allocate(size_t count, uintptr_t *address);
/// Free previously allocated frames.
/// \arg address The physical address of the first frame to free
/// \arg count The number of frames to be freed
void free(uintptr_t address, size_t count);
/// Consolidate the free and used block lists. Return freed blocks
/// to the cache.
void consolidate_blocks();
private:
using frame_block_node = list_node<frame_block>;
frame_block_list m_free; ///< Free frames list
frame_block_list m_used; ///< In-use frames list
frame_block_list m_cache; ///< Spare frame-block structs
frame_block_node *get_block_node();
frame_allocator(const frame_allocator &) = delete;
};
/// Flags used by `frame_block`.
enum class frame_block_flags : uint32_t
{
none = 0x0000,
mmio = 0x0001, ///< Memory is a MMIO region
nonvolatile = 0x0002, ///< Memory is non-volatile storage
pending_free = 0x0020, ///< Memory should be freed
acpi_wait = 0x0040, ///< Memory should be freed after ACPI init
permanent = 0x0080, ///< Memory is permanently unusable
// The following are used only during the memory bootstraping
// process, and tell the page manager where to initially map
// the given block.
map_ident = 0x0100, ///< Identity map
map_kernel = 0x0200, ///< Map into normal kernel space
map_offset = 0x0400, ///< Map into offset kernel space
map_mask = 0x0700, ///< Mask of all map_* values
};
} // namespace kutil
IS_BITFIELD(kutil::frame_block_flags);
namespace kutil {
/// A block of contiguous frames. Each `frame_block` represents contiguous
/// physical frames with the same attributes.
struct frame_block
{
uintptr_t address;
uint32_t count;
frame_block_flags flags;
inline bool has_flag(frame_block_flags f) const { return bitfield_has(flags, f); }
inline uintptr_t end() const { return address + (count * memory::frame_size); }
inline bool contains(uintptr_t addr) const { return addr >= address && addr < end(); }
/// Helper to zero out a block and optionally set the next pointer.
void zero();
/// Helper to copy a bock from another block
/// \arg other The block to copy from
void copy(frame_block *other);
/// Compare two blocks by address.
/// \arg rhs The right-hand comparator
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
int compare(const frame_block *rhs) const;
/// Traverse the list, joining adjacent blocks where possible.
/// \arg list The list to consolidate
/// \returns A linked list of freed frame_block structures.
static frame_block_list consolidate(frame_block_list &list);
};
} // namespace kutil

View File

@@ -1,36 +1,35 @@
#pragma once
/// \file heap_manager.h
/// A buddy allocator and related definitions.
/// \file heap_allocator.h
/// A buddy allocator for a memory heap
#include <stddef.h>
#include "kutil/allocator.h"
namespace kutil {
/// Manager for allocation of heap memory.
class heap_manager
/// Allocator for a given heap range
class heap_allocator :
public allocator
{
public:
/// Callback signature for growth function. Memory returned does not need
/// to be contiguous, but needs to be alined to the length requested.
using grow_callback = void * (*)(size_t length);
/// Default constructor creates a valid but empty heap.
heap_allocator();
/// Default constructor. Creates an invalid manager.
heap_manager();
/// Constructor.
/// \arg grow_cb Function pointer to grow the heap size
heap_manager(grow_callback grow_cb);
/// Constructor. The given memory area must already have been reserved.
/// \arg start Starting address of the heap
/// \arg size Size of the heap in bytes
heap_allocator(uintptr_t start, size_t size);
/// Allocate memory from the area managed.
/// \arg length The amount of memory to allocate, in bytes
/// \returns A pointer to the allocated memory, or nullptr if
/// allocation failed.
void * allocate(size_t length);
virtual void * allocate(size_t length) override;
/// Free a previous allocation.
/// \arg p A pointer previously retuned by allocate()
void free(void *p);
virtual void free(void *p) override;
/// Minimum block size is (2^min_size). Must be at least 6.
static const unsigned min_size = 6;
@@ -41,9 +40,6 @@ public:
protected:
class mem_header;
/// Expand the size of memory
void grow_memory();
/// Ensure there is a block of a given size, recursively splitting
/// \arg size Size category of the block we want
void ensure_block(unsigned size);
@@ -58,11 +54,11 @@ protected:
/// \returns A detached block of the given size
mem_header * pop_free(unsigned size);
uintptr_t m_next;
size_t m_size;
mem_header *m_free[max_size - min_size + 1];
grow_callback m_grow;
heap_manager(const heap_manager &) = delete;
heap_allocator(const heap_allocator &) = delete;
};
} // namespace kutil

View File

@@ -130,6 +130,18 @@ public:
other.m_count = 0;
}
/// Assignment operator. Takes ownership of list elements.
/// Destructive towards current data!
linked_list & operator=(linked_list &&other)
{
m_head = other.m_head;
m_tail = other.m_tail;
m_count = other.m_count;
other.m_head = other.m_tail = nullptr;
other.m_count = 0;
return *this;
}
/// Check if the list is empty.
/// \returns true if the list is empty
bool empty() const { return m_head == nullptr; }
@@ -140,7 +152,7 @@ public:
/// Count the items in the list.
/// \returns The number of entries in the list.
size_t count_length() const
size_t count_length()
{
size_t len = 0;
for (item_type *cur = m_head; cur; cur = cur->m_next) ++len;
@@ -274,14 +286,14 @@ public:
{
if (!item) return;
if (!existing)
if (!existing) {
push_back(item);
else if (existing == m_head)
} else if (existing == m_head) {
push_front(item);
else
} else {
existing->insert_before(item);
m_count += 1;
m_count += 1;
}
}
/// Inserts an item into the list after another given item.
@@ -291,14 +303,14 @@ public:
{
if (!item) return;
if (!existing)
if (!existing) {
push_front(item);
else if (existing == m_tail)
} else if (existing == m_tail) {
push_back(item);
else
} else {
existing->insert_after(item);
m_count += 1;
m_count += 1;
}
}
/// Insert an item into the list in a sorted position. Depends on T

View File

@@ -9,15 +9,6 @@ void * operator new (size_t, void *p) noexcept;
namespace kutil {
/// Allocate memory.
/// \arg n The number of bytes to allocate
/// \returns The allocated memory
void * malloc(size_t n);
/// Free memory allocated by malloc().
/// \arg p A pointer previously returned by malloc()
void free(void *p);
/// Fill memory with the given value.
/// \arg p The beginning of the memory area to fill
/// \arg v The byte value to fill memory with
@@ -67,14 +58,4 @@ inline T* mask_pointer(T *p, uintptr_t mask)
/// \arg off An optional offset into the region
uint8_t checksum(const void *p, size_t len, size_t off = 0);
class heap_manager;
namespace setup {
/// Set the heap that malloc() / free() will use.
/// \arg mm The heap manager for the heap to use.
void set_heap(heap_manager *mm);
} // namespace kutil::setup
} // namespace kutil

View File

@@ -1,6 +1,7 @@
#pragma once
/// \file slab_allocator.h
/// A slab allocator and related definitions
#include "kutil/allocator.h"
#include "kutil/assert.h"
#include "kutil/linked_list.h"
#include "kutil/memory.h"
@@ -9,19 +10,17 @@ namespace kutil {
/// A slab allocator for small structures kept in a linked list
template <typename T, typename Alloc = void * (*)(size_t)>
template <typename T, size_t N = memory::frame_size>
class slab_allocator :
public linked_list<T>
{
public:
using item_type = list_node<T>;
using alloc_type = Alloc;
/// Default constructor.
/// \arg chunk_size The size of chunk to allocate, in bytes. 0 means default.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(size_t chunk_size = 0, Alloc alloc = malloc) :
m_chunk_size(chunk_size),
slab_allocator(allocator &alloc) :
m_alloc(alloc)
{
}
@@ -46,18 +45,16 @@ public:
void allocate()
{
size_t size = m_chunk_size ? m_chunk_size : 10 * sizeof(item_type);
void *memory = m_alloc(size);
size_t count = size / sizeof(item_type);
constexpr unsigned count = N / sizeof(item_type);
void *memory = m_alloc.allocate(N);
item_type *items = reinterpret_cast<item_type *>(memory);
for (size_t i = 0; i < count; ++i)
this->push_back(&items[i]);
}
private:
size_t m_chunk_size;
Alloc m_alloc;
allocator& m_alloc;
};
} // namespace kutil

View File

@@ -4,6 +4,7 @@
#include <algorithm>
#include <utility>
#include "kutil/allocator.h"
#include "kutil/memory.h"
namespace kutil {
@@ -14,18 +15,20 @@ class vector
{
public:
/// Default constructor. Creates an empty vector with no capacity.
vector() :
vector(kutil::allocator &alloc = allocator::invalid) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(alloc)
{}
/// Constructor. Creates an empty array with capacity.
/// \arg capacity Initial capacity to allocate
vector(size_t capacity) :
vector(size_t capacity, allocator &alloc) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(alloc)
{
set_capacity(capacity);
}
@@ -34,7 +37,8 @@ public:
vector(const vector& other) :
m_size(0),
m_capacity(0),
m_elements(nullptr)
m_elements(nullptr),
m_alloc(other.m_alloc)
{
set_capacity(other.m_capacity);
kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
@@ -45,7 +49,8 @@ public:
vector(vector&& other) :
m_size(other.m_size),
m_capacity(other.m_capacity),
m_elements(other.m_elements)
m_elements(other.m_elements),
m_alloc(other.m_alloc)
{
other.m_size = 0;
other.m_capacity = 0;
@@ -142,7 +147,7 @@ public:
/// \arg capacity Number of elements to allocate
void set_capacity(size_t capacity)
{
T *new_array = reinterpret_cast<T *>(malloc(capacity * sizeof(T)));
T *new_array = m_alloc.allocate<T>(capacity);
size_t size = std::min(capacity, m_size);
kutil::memcpy(new_array, m_elements, size * sizeof(T));
@@ -151,7 +156,7 @@ public:
m_size = size;
m_capacity = capacity;
delete [] m_elements;
m_alloc.free(m_elements);
m_elements = new_array;
}
@@ -159,6 +164,7 @@ private:
size_t m_size;
size_t m_capacity;
T *m_elements;
allocator &m_alloc;
};
} // namespace kutil

View File

@@ -1,46 +1,11 @@
#include "kutil/memory.h"
#include "kutil/heap_manager.h"
namespace std {
enum class __attribute__ ((__type_visibility("default"))) align_val_t : size_t { };
}
#ifdef __POPCORN__
void * operator new(size_t n, std::align_val_t) { return kutil::malloc(n); }
void * operator new (size_t n) { return kutil::malloc(n); }
void * operator new[] (size_t n) { return kutil::malloc(n); }
void operator delete (void *p) noexcept { return kutil::free(p); }
void operator delete[] (void *p) noexcept { return kutil::free(p); }
#endif
namespace kutil {
namespace setup {
static heap_manager *heap_memory_manager;
void
set_heap(heap_manager *mm)
{
setup::heap_memory_manager = mm;
}
} // namespace kutil::setup
void *
malloc(size_t n)
{
return setup::heap_memory_manager->allocate(n);
}
void
free(void *p)
{
setup::heap_memory_manager->free(p);
}
void *
memset(void *s, uint8_t v, size_t n)
{