[kernel] Move more from kutil to kernel

The moving of kernel-only code out of kutil continues. (See 042f061)
This commit moves the following:

- The heap allocator code
- memory.cpp/h which means:
  - letting string.h be the right header for memset and memcpy, still
    including an implementation of it for the kernel though, since
    we're not linking libc to the kernel
  - Changing calls to kalloc/kfree to new/delete in kutil containers
    that aren't going to be merged into the kernel
- Fixing a problem with stdalign.h from libc, which was causing issues
  for type_traits.
This commit is contained in:
Justin C. Miller
2022-01-01 23:23:51 -08:00
parent 4d5ed8157c
commit a6ec294f63
29 changed files with 116 additions and 123 deletions

View File

@@ -1,179 +0,0 @@
#include <stdint.h>
#include "kutil/assert.h"
#include "kutil/heap_allocator.h"
#include "kutil/memory.h"
#include "kutil/util.h"
namespace kutil {
struct heap_allocator::mem_header
{
mem_header(mem_header *prev, mem_header *next, uint8_t order) :
m_prev(prev), m_next(next)
{
set_order(order);
}
inline void set_order(uint8_t order) {
m_prev = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(prev()) | (order & 0x3f));
}
inline void set_used(bool used) {
m_next = reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(next()) | (used ? 1 : 0));
}
inline void set_next(mem_header *next) {
bool u = used();
m_next = next;
set_used(u);
}
inline void set_prev(mem_header *prev) {
uint8_t s = order();
m_prev = prev;
set_order(s);
}
void remove() {
if (next()) next()->set_prev(prev());
if (prev()) prev()->set_next(next());
set_prev(nullptr);
set_next(nullptr);
}
inline mem_header * next() { return kutil::mask_pointer(m_next, 0x3f); }
inline mem_header * prev() { return kutil::mask_pointer(m_prev, 0x3f); }
inline mem_header * buddy() const {
return reinterpret_cast<mem_header *>(
reinterpret_cast<uintptr_t>(this) ^ (1 << order()));
}
inline bool eldest() const { return this < buddy(); }
inline uint8_t order() const { return reinterpret_cast<uintptr_t>(m_prev) & 0x3f; }
inline bool used() const { return reinterpret_cast<uintptr_t>(m_next) & 0x1; }
private:
mem_header *m_prev;
mem_header *m_next;
};
heap_allocator::heap_allocator() : m_start {0}, m_end {0} {}
heap_allocator::heap_allocator(uintptr_t start, size_t size) :
m_start {start},
m_end {start+size},
m_blocks {0},
m_allocated_size {0}
{
kutil::memset(m_free, 0, sizeof(m_free));
}
void *
heap_allocator::allocate(size_t length)
{
size_t total = length + sizeof(mem_header);
if (length == 0)
return nullptr;
unsigned order = log2(total);
if (order < min_order)
order = min_order;
kassert(order <= max_order, "Tried to allocate a block bigger than max_order");
if (order > max_order)
return nullptr;
scoped_lock lock {m_lock};
mem_header *header = pop_free(order);
header->set_used(true);
m_allocated_size += (1 << order);
return header + 1;
}
void
heap_allocator::free(void *p)
{
if (!p) return;
uintptr_t addr = reinterpret_cast<uintptr_t>(p);
kassert(addr >= m_start && addr < m_end,
"Attempt to free non-heap pointer");
scoped_lock lock {m_lock};
mem_header *header = reinterpret_cast<mem_header *>(p);
header -= 1; // p points after the header
header->set_used(false);
m_allocated_size -= (1 << header->order());
while (header->order() != max_order) {
auto order = header->order();
mem_header *buddy = header->buddy();
if (buddy->used() || buddy->order() != order)
break;
if (get_free(order) == buddy)
get_free(order) = buddy->next();
buddy->remove();
header = header->eldest() ? header : buddy;
header->set_order(order + 1);
}
uint8_t order = header->order();
header->set_next(get_free(order));
get_free(order) = header;
if (header->next())
header->next()->set_prev(header);
}
void
heap_allocator::ensure_block(unsigned order)
{
if (get_free(order) != nullptr)
return;
if (order == max_order) {
size_t bytes = (1 << max_order);
uintptr_t next = m_start + m_blocks * bytes;
if (next + bytes <= m_end) {
mem_header *nextp = reinterpret_cast<mem_header *>(next);
new (nextp) mem_header(nullptr, nullptr, order);
get_free(order) = nextp;
++m_blocks;
}
} else {
mem_header *orig = pop_free(order + 1);
if (orig) {
mem_header *next = kutil::offset_pointer(orig, 1 << order);
new (next) mem_header(orig, nullptr, order);
orig->set_next(next);
orig->set_order(order);
get_free(order) = orig;
}
}
}
heap_allocator::mem_header *
heap_allocator::pop_free(unsigned order)
{
ensure_block(order);
mem_header *block = get_free(order);
if (block) {
get_free(order) = block->next();
block->remove();
}
return block;
}
} // namespace kutil

View File

@@ -1,66 +0,0 @@
#pragma once
/// \file heap_allocator.h
/// A buddy allocator for a memory heap
#include <stddef.h>
#include "kutil/spinlock.h"
namespace kutil {
/// Allocator for a given heap range
class heap_allocator
{
public:
/// Default constructor creates a valid but empty heap.
heap_allocator();
/// Constructor. The given memory area must already have been reserved.
/// \arg start Starting address of the heap
/// \arg size Size of the heap in bytes
heap_allocator(uintptr_t start, size_t size);
/// Allocate memory from the area managed.
/// \arg length The amount of memory to allocate, in bytes
/// \returns A pointer to the allocated memory, or nullptr if
/// allocation failed.
void * allocate(size_t length);
/// Free a previous allocation.
/// \arg p A pointer previously retuned by allocate()
void free(void *p);
/// Minimum block size is (2^min_order). Must be at least 6.
static const unsigned min_order = 6;
/// Maximum block size is (2^max_order). Must be less than 64.
static const unsigned max_order = 22;
protected:
class mem_header;
/// Ensure there is a block of a given order, recursively splitting
/// \arg order Order (2^N) of the block we want
void ensure_block(unsigned order);
/// Helper accessor for the list of blocks of a given order
/// \arg order Order (2^N) of the block we want
/// \returns A mutable reference to the head of the list
mem_header *& get_free(unsigned order) { return m_free[order - min_order]; }
/// Helper to get a block of the given order, growing if necessary
/// \arg order Order (2^N) of the block we want
/// \returns A detached block of the given order
mem_header * pop_free(unsigned order);
uintptr_t m_start, m_end;
size_t m_blocks;
mem_header *m_free[max_order - min_order + 1];
size_t m_allocated_size;
spinlock m_lock;
heap_allocator(const heap_allocator &) = delete;
};
} // namespace kutil

View File

@@ -10,9 +10,10 @@
/// http://codecapsule.com/2013/11/11/robin-hood-hashing/
/// http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
#include <new>
#include <stdint.h>
#include <string.h>
#include "kutil/hash.h"
#include "kutil/memory.h"
#include "kutil/vector.h"
#include "kutil/util.h"
@@ -87,7 +88,7 @@ public:
virtual ~base_map() {
for (size_t i = 0; i < m_capacity; ++i)
m_nodes[i].~node();
kfree(m_nodes);
delete [] reinterpret_cast<uint8_t*>(m_nodes);
}
iterator begin() {
@@ -149,8 +150,8 @@ protected:
m_capacity = capacity;
const size_t size = m_capacity * sizeof(node);
m_nodes = reinterpret_cast<node*>(kalloc(size));
kutil::memset(m_nodes, 0, size);
m_nodes = reinterpret_cast<node*>(new uint8_t [size]);
memset(m_nodes, 0, size);
}
void grow() {
@@ -169,7 +170,7 @@ protected:
n.~node();
}
kfree(old);
delete [] reinterpret_cast<uint8_t*>(old);
}
inline node * construct(size_t i, uint64_t h, K &&k, V &&v) {

View File

@@ -1,72 +0,0 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
void * operator new (size_t, void *p) noexcept;
namespace kutil {
/// Allocate from the default allocator. Note this needs to be
/// implemented by users of the kutil library.
/// \arg size The size in bytes requested
/// \returns A pointer to the newly allocated memory,
/// or nullptr on error
void * kalloc(size_t size);
/// Free memory allocated by `kalloc`. Note this needs to be
/// implemented by users of the kutil library.
/// \arg p Pointer that was returned from a `kalloc` call
void kfree(void *p);
/// Fill memory with the given value.
/// \arg p The beginning of the memory area to fill
/// \arg v The byte value to fill memory with
/// \arg n The size in bytes of the memory area
/// \returns A pointer to the filled memory
void * memset(void *p, uint8_t v, size_t n);
/// Copy an area of memory to another
/// \dest The memory to copy to
/// \src The memory to copy from
/// \n The number of bytes to copy
/// \returns A pointer to the destination memory
void * memcpy(void *dest, const void *src, size_t n);
/// Read a value of type T from a location in memory
/// \arg p The location in memory to read
/// \returns The value at the given location cast to T
template <typename T>
inline T read_from(const void *p)
{
return *reinterpret_cast<const T *>(p);
}
/// Get a pointer that's offset from another pointer
/// \arg p The base pointer
/// \arg n The offset in bytes
/// \returns The offset pointer
template <typename T>
inline T * offset_pointer(T *p, ptrdiff_t n)
{
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(p) + n);
}
/// Return a pointer with the given bits masked out
/// \arg p The original pointer
/// \arg mask A bitmask of bits to clear from p
/// \returns The masked pointer
template <typename T>
inline T* mask_pointer(T *p, uintptr_t mask)
{
return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(p) & ~mask);
}
/// Do a simple byte-wise checksum of an area of memory.
/// \arg p The start of the memory region
/// \arg len The number of bytes in the region
/// \arg off An optional offset into the region
uint8_t checksum(const void *p, size_t len, size_t off = 0);
} // namespace kutil

View File

@@ -2,9 +2,9 @@
/// \file vector.h
/// Definition of a simple dynamic vector collection for use in kernel space
#include <string.h>
#include <utility>
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "kutil/util.h"
namespace kutil {
@@ -42,7 +42,7 @@ public:
m_elements(nullptr)
{
set_capacity(other.m_capacity);
kutil::memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
memcpy(m_elements, other.m_elements, other.m_size * sizeof(T));
m_size = other.m_size;
}
@@ -73,7 +73,7 @@ public:
bool was_static = m_capacity & ~cap_mask;
if (!was_static)
kfree(m_elements);
delete [] m_elements;
}
/// Get the size of the array.
@@ -271,17 +271,18 @@ public:
void set_capacity(count_t capacity)
{
bool was_static = m_capacity & ~cap_mask;
T *new_array = reinterpret_cast<T*>(kalloc(capacity * sizeof(T)));
T *new_array = reinterpret_cast<T*>(new uint8_t [capacity * sizeof(T)]);
count_t size = capacity > m_size ? m_size : capacity;
kutil::memcpy(new_array, m_elements, size * sizeof(T));
memcpy(new_array, m_elements, size * sizeof(T));
while (size < m_size) remove();
m_size = size;
m_capacity = capacity;
if (!was_static)
kfree(m_elements);
delete [] m_elements;
m_elements = new_array;
}

View File

@@ -6,7 +6,5 @@ module("kutil",
sources = [
"assert.cpp",
"bip_buffer.cpp",
"heap_allocator.cpp",
"memory.cpp",
"spinlock.cpp",
])

View File

@@ -1,35 +0,0 @@
#include "kutil/memory.h"
namespace std {
enum class __attribute__ ((__type_visibility("default"))) align_val_t : size_t { };
}
namespace kutil {
void *
memset(void *s, uint8_t v, size_t n)
{
uint8_t *p = reinterpret_cast<uint8_t *>(s);
for (size_t i = 0; i < n; ++i) p[i] = v;
return s;
}
void *
memcpy(void *dest, const void *src, size_t n)
{
const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
uint8_t *d = reinterpret_cast<uint8_t *>(dest);
for (size_t i = 0; i < n; ++i) d[i] = s[i];
return d;
}
uint8_t
checksum(const void *p, size_t len, size_t off)
{
uint8_t sum = 0;
const uint8_t *c = reinterpret_cast<const uint8_t *>(p);
for (int i = off; i < len; ++i) sum += c[i];
return sum;
}
} // namespace kutil

View File

@@ -5,8 +5,10 @@
Permission is granted to use, modify, and / or redistribute at will.
*/
#ifndef __cplusplus
#define alignas _Alignas
#define alignof _Alignof
#endif
#define __alignas_is_defined 1
#define __alignof_is_defined 1