mirror of
https://github.com/justinian/jsix.git
synced 2025-12-09 16:04:32 -08:00
[kernel] Re-add slab_allocated mixin
The return of slab_allocated! Now after the kutil/util/kernel giant cleanup, this belongs squarely in the kernel, and works much better there. Slabs are allocated via a bump pointer into a new kernel VMA, instead of using kalloc() or allocating pages directly.
This commit is contained in:
@@ -15,3 +15,6 @@
|
||||
|
||||
- name: buffers
|
||||
size: 64G
|
||||
|
||||
- name: slabs
|
||||
size: 64G
|
||||
|
||||
@@ -23,6 +23,8 @@ using bootproto::allocation_register;
|
||||
|
||||
using obj::vm_flags;
|
||||
|
||||
uintptr_t g_slabs_bump_pointer;
|
||||
|
||||
// These objects are initialized _before_ global constructors are called,
|
||||
// so we don't want them to have global constructors at all, lest they
|
||||
// overwrite the previous initialization.
|
||||
@@ -44,6 +46,10 @@ obj::vm_area_guarded g_kernel_buffers {
|
||||
mem::buffers_size,
|
||||
vm_flags::write};
|
||||
|
||||
obj::vm_area_open g_kernel_slabs {
|
||||
mem::slabs_size,
|
||||
vm_flags::write};
|
||||
|
||||
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
|
||||
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
|
||||
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
|
||||
@@ -106,6 +112,8 @@ memory_initialize_post_ctors(bootproto::args &kargs)
|
||||
{
|
||||
vm_space &vm = vm_space::kernel_space();
|
||||
vm.add(mem::buffers_offset, &g_kernel_buffers);
|
||||
vm.add(mem::slabs_offset, &g_kernel_slabs);
|
||||
g_slabs_bump_pointer = mem::slabs_offset;
|
||||
|
||||
g_frame_allocator.free(
|
||||
get_physical_page(kargs.page_tables.pointer),
|
||||
|
||||
57
src/kernel/objects/slab_allocated.h
Normal file
57
src/kernel/objects/slab_allocated.h
Normal file
@@ -0,0 +1,57 @@
|
||||
#pragma once
|
||||
/// \file slab_allocated.h
|
||||
/// A parent template class for slab-allocated objects
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <util/pointers.h>
|
||||
#include <util/vector.h>
|
||||
|
||||
#include "memory.h"
|
||||
|
||||
extern uintptr_t g_slabs_bump_pointer;
|
||||
|
||||
template <typename T, unsigned N = 1>
|
||||
class slab_allocated
|
||||
{
|
||||
public:
|
||||
constexpr static unsigned slab_pages = N;
|
||||
constexpr static unsigned slab_size = N * mem::frame_size;
|
||||
|
||||
void * operator new(size_t size)
|
||||
{
|
||||
kassert(size == sizeof(T), "Slab allocator got wrong size allocation");
|
||||
if (s_free.count() == 0)
|
||||
allocate_chunk();
|
||||
|
||||
T *item = s_free.pop();
|
||||
memset(item, 0, sizeof(T));
|
||||
return item;
|
||||
}
|
||||
|
||||
void operator delete(void *p) { s_free.append(reinterpret_cast<T*>(p)); }
|
||||
|
||||
private:
|
||||
constexpr static size_t per_block = slab_size / sizeof(T);
|
||||
|
||||
static void allocate_chunk()
|
||||
{
|
||||
s_free.ensure_capacity(per_block);
|
||||
|
||||
uintptr_t start = __atomic_fetch_add(
|
||||
&g_slabs_bump_pointer, slab_size,
|
||||
__ATOMIC_SEQ_CST);
|
||||
|
||||
T *current = reinterpret_cast<T*>(start);
|
||||
T *end = util::offset_pointer(current, slab_size);
|
||||
while (current < end)
|
||||
s_free.append(current++);
|
||||
}
|
||||
|
||||
static util::vector<T*> s_free;
|
||||
};
|
||||
|
||||
template <typename T, unsigned N>
|
||||
util::vector<T*> slab_allocated<T,N>::s_free;
|
||||
|
||||
#define DEFINE_SLAB_ALLOCATOR(type) \
|
||||
template<> util::vector<typename type*> slab_allocated<typename type, type::slab_pages>::s_free {};
|
||||
Reference in New Issue
Block a user