[kernel] Change heap alloc for better alignment

Created a new util/node_map.h that implements a map that grows in-place.
Now this is used for tracking blocks' size orders, instead of a header
at the start of the memory block. This allows the whole buddy block to
be allocated, allowing for page-aligned (or greater) blocks to be
requested from the heap.
This commit is contained in:
Justin C. Miller
2022-10-02 17:27:21 -07:00
parent 11b61ab345
commit e90647d498
9 changed files with 518 additions and 144 deletions

View File

@@ -5,7 +5,7 @@
#include <stddef.h>
#include <util/spinlock.h>
#include <util/node_map.h>
/// Allocator for a given heap range
class heap_allocator
@@ -15,9 +15,10 @@ public:
heap_allocator();
/// Constructor. The given memory area must already have been reserved.
/// \arg start Starting address of the heap
/// \arg size Size of the heap in bytes
heap_allocator(uintptr_t start, size_t size);
/// \arg start Starting address of the heap
/// \arg size Size of the heap in bytes
/// \arg heapmap Starting address of the heap tracking map
heap_allocator(uintptr_t start, size_t size, uintptr_t heapmap);
/// Allocate memory from the area managed.
/// \arg length The amount of memory to allocate, in bytes
@@ -30,34 +31,74 @@ public:
void free(void *p);
/// Minimum block size is (2^min_order). Must be at least 6.
static const unsigned min_order = 6;
static const unsigned min_order = 6; // 2^6 == 64 B
/// Maximum block size is (2^max_order). Must be less than 64.
static const unsigned max_order = 22;
/// Maximum block size is (2^max_order). Must be less than 32 + min_order.
static const unsigned max_order = 22; // 2^22 == 4 MiB
protected:
class mem_header;
struct free_header;
struct block_info
{
uint32_t offset;
uint8_t order;
bool free;
};
friend uint32_t & get_map_key(block_info &info);
/// Ensure there is a block of a given order, recursively splitting
/// \arg order Order (2^N) of the block we want
void ensure_block(unsigned order);
inline uint32_t map_key(void *p) const {
return static_cast<uint32_t>(
(reinterpret_cast<uintptr_t>(p) - m_start) >> min_order);
}
using block_map = util::inplace_map<uint32_t, block_info, -1u>;
/// Get the largest block size order that aligns with this address
inline unsigned address_order(uintptr_t addr) {
unsigned tz = __builtin_ctzll(addr);
return tz > max_order ? max_order : tz;
}
/// Helper accessor for the list of blocks of a given order
/// \arg order Order (2^N) of the block we want
/// \returns A mutable reference to the head of the list
mem_header *& get_free(unsigned order) { return m_free[order - min_order]; }
free_header *& get_free(unsigned order) { return m_free[order - min_order]; }
/// Helper to get a block of the given order, growing if necessary
/// Helper to remove and return the first block in the free
/// list for the given order.
free_header * pop_free(unsigned order);
/// Merge the given block with any currently free buddies to
/// create the largest block possible.
/// \arg block The current block
/// \returns The fully-merged block
free_header * merge_block(free_header *block);
/// Create a new block of the given order past the end of the existing
/// heap. The block will be marked as non-free.
/// \arg order The requested size order
/// \returns A pointer to the block's memory
void * new_block(unsigned order);
/// Register the given block as free with the given order.
/// \arg block The newly-created or freed block
/// \arg order The size order to set on the block
void register_free_block(free_header *block, unsigned order);
/// Helper to get a block of the given order by splitting existing
/// larger blocks. Returns false if there were no larger blocks.
/// \arg order Order (2^N) of the block we want
/// \returns A detached block of the given order
mem_header * pop_free(unsigned order);
/// \arg block [out] Receives a pointer to the requested block
/// \returns True if a split was done
bool split_off(unsigned order, free_header *&block);
uintptr_t m_start, m_end;
size_t m_blocks;
mem_header *m_free[max_order - min_order + 1];
size_t m_maxsize;
free_header *m_free[max_order - min_order + 1];
size_t m_allocated_size;
util::spinlock m_lock;
block_map m_map;
heap_allocator(const heap_allocator &) = delete;
};