[kernel] Re-add slab_allocated mixin

The return of slab_allocated! Now after the kutil/util/kernel giant
cleanup, this belongs squarely in the kernel, and works much better
there. Slabs are allocated via a bump pointer into a new kernel VMA,
instead of using kalloc() or allocating pages directly.
This commit is contained in:
Justin C. Miller
2022-01-30 20:46:19 -08:00
parent a7245116b6
commit dd535158f2
3 changed files with 68 additions and 0 deletions

View File

@@ -23,6 +23,8 @@ using bootproto::allocation_register;
using obj::vm_flags;
uintptr_t g_slabs_bump_pointer;
// These objects are initialized _before_ global constructors are called,
// so we don't want them to have global constructors at all, lest they
// overwrite the previous initialization.
@@ -44,6 +46,10 @@ obj::vm_area_guarded g_kernel_buffers {
mem::buffers_size,
vm_flags::write};
obj::vm_area_open g_kernel_slabs {
mem::slabs_size,
vm_flags::write};
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
@@ -106,6 +112,8 @@ memory_initialize_post_ctors(bootproto::args &kargs)
{
vm_space &vm = vm_space::kernel_space();
vm.add(mem::buffers_offset, &g_kernel_buffers);
vm.add(mem::slabs_offset, &g_kernel_slabs);
g_slabs_bump_pointer = mem::slabs_offset;
g_frame_allocator.free(
get_physical_page(kargs.page_tables.pointer),

View File

@@ -0,0 +1,57 @@
#pragma once
/// \file slab_allocated.h
/// A parent template class for slab-allocated objects
#include <stdlib.h>
#include <util/pointers.h>
#include <util/vector.h>
#include "memory.h"
extern uintptr_t g_slabs_bump_pointer;
template <typename T, unsigned N = 1>
class slab_allocated
{
public:
constexpr static unsigned slab_pages = N;
constexpr static unsigned slab_size = N * mem::frame_size;
void * operator new(size_t size)
{
kassert(size == sizeof(T), "Slab allocator got wrong size allocation");
if (s_free.count() == 0)
allocate_chunk();
T *item = s_free.pop();
memset(item, 0, sizeof(T));
return item;
}
void operator delete(void *p) { s_free.append(reinterpret_cast<T*>(p)); }
private:
constexpr static size_t per_block = slab_size / sizeof(T);
static void allocate_chunk()
{
s_free.ensure_capacity(per_block);
uintptr_t start = __atomic_fetch_add(
&g_slabs_bump_pointer, slab_size,
__ATOMIC_SEQ_CST);
T *current = reinterpret_cast<T*>(start);
T *end = util::offset_pointer(current, slab_size);
while (current < end)
s_free.append(current++);
}
static util::vector<T*> s_free;
};
template <typename T, unsigned N>
util::vector<T*> slab_allocated<T,N>::s_free;
#define DEFINE_SLAB_ALLOCATOR(type) \
template<> util::vector<typename type*> slab_allocated<typename type, type::slab_pages>::s_free {};