[kernel] Move slab_allocated items to the heap

Allocate the slabs for slab-allocated items to the heap, now that heap
regions are aligned. This also lets the slab sizes be non-page-sized.
This commit is contained in:
Justin C. Miller
2022-10-11 18:52:19 -07:00
parent c9bcc87511
commit e830a3d37b
4 changed files with 7 additions and 22 deletions

View File

@@ -24,6 +24,3 @@
- name: buffers - name: buffers
size: 64G size: 64G
- name: slabs
size: 64G

View File

@@ -24,8 +24,6 @@ using bootproto::allocation_register;
using obj::vm_flags; using obj::vm_flags;
uintptr_t g_slabs_bump_pointer;
// These objects are initialized _before_ global constructors are called, // These objects are initialized _before_ global constructors are called,
// so we don't want them to have global constructors at all, lest they // so we don't want them to have global constructors at all, lest they
// overwrite the previous initialization. // overwrite the previous initialization.
@@ -56,10 +54,6 @@ obj::vm_area_guarded g_kernel_buffers {
mem::buffers_size, mem::buffers_size,
vm_flags::write}; vm_flags::write};
obj::vm_area_open g_kernel_slabs {
mem::slabs_size,
vm_flags::write};
void * operator new(size_t size) { return g_kernel_heap.allocate(size); } void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); } void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); } void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
@@ -135,8 +129,6 @@ memory_initialize_post_ctors(bootproto::args &kargs)
{ {
vm_space &vm = vm_space::kernel_space(); vm_space &vm = vm_space::kernel_space();
vm.add(mem::buffers_offset, &g_kernel_buffers); vm.add(mem::buffers_offset, &g_kernel_buffers);
vm.add(mem::slabs_offset, &g_kernel_slabs);
g_slabs_bump_pointer = mem::slabs_offset;
g_frame_allocator.free( g_frame_allocator.free(
get_physical_page(kargs.page_tables.pointer), get_physical_page(kargs.page_tables.pointer),

View File

@@ -7,6 +7,7 @@
#include <util/map.h> #include <util/map.h>
#include <util/spinlock.h> #include <util/spinlock.h>
#include "memory.h"
#include "objects/kobject.h" #include "objects/kobject.h"
#include "slab_allocated.h" #include "slab_allocated.h"
#include "wait_queue.h" #include "wait_queue.h"
@@ -79,7 +80,7 @@ private:
struct mailbox::message : struct mailbox::message :
public slab_allocated<message, 1> public slab_allocated<message, mem::frame_size>
{ {
uint64_t tag; uint64_t tag;
uint64_t subtag; uint64_t subtag;

View File

@@ -8,14 +8,11 @@
#include "memory.h" #include "memory.h"
extern uintptr_t g_slabs_bump_pointer; template <typename T, size_t N>
template <typename T, unsigned N = 1>
class slab_allocated class slab_allocated
{ {
public: public:
constexpr static unsigned slab_pages = N; static constexpr size_t slab_size = N;
constexpr static unsigned slab_size = N * mem::frame_size;
void * operator new(size_t size) void * operator new(size_t size)
{ {
@@ -37,9 +34,7 @@ private:
{ {
s_free.ensure_capacity(per_block); s_free.ensure_capacity(per_block);
uintptr_t start = __atomic_fetch_add( uint8_t *start = new uint8_t [slab_size];
&g_slabs_bump_pointer, slab_size,
__ATOMIC_SEQ_CST);
T *current = reinterpret_cast<T*>(start); T *current = reinterpret_cast<T*>(start);
T *end = util::offset_pointer(current, slab_size); T *end = util::offset_pointer(current, slab_size);
@@ -50,8 +45,8 @@ private:
static util::vector<T*> s_free; static util::vector<T*> s_free;
}; };
template <typename T, unsigned N> template <typename T, size_t N>
util::vector<T*> slab_allocated<T,N>::s_free; util::vector<T*> slab_allocated<T,N>::s_free;
#define DEFINE_SLAB_ALLOCATOR(type) \ #define DEFINE_SLAB_ALLOCATOR(type) \
template<> util::vector<typename type*> slab_allocated<typename type, type::slab_pages>::s_free {}; template<> util::vector<typename type*> slab_allocated<typename type, type::slab_size>::s_free {};