Allow vm_space to merge ranges

This commit is contained in:
Justin C. Miller
2019-04-18 00:22:01 -07:00
parent 806bfd1fbf
commit 88315c25a5
5 changed files with 165 additions and 18 deletions

View File

@@ -78,6 +78,7 @@ kernel_main(kernel_args *header)
memory::page_offset - memory::kernel_offset, memory::page_offset - memory::kernel_offset,
heap); heap);
k_space.reserve(0xffffff0000100000, 0x100000); k_space.reserve(0xffffff0000100000, 0x100000);
k_space.reserve(0xffffff0000200000, 0x100000);
initrd::disk ird(header->initrd, heap); initrd::disk ird(header->initrd, heap);
log::info(logs::boot, "initrd loaded with %d files.", ird.files().count()); log::info(logs::boot, "initrd loaded with %d files.", ird.files().count());

View File

@@ -150,6 +150,64 @@ private:
return existing; return existing;
} }
static node_type * remove(node_type *existing, node_type *subtrahend, allocator &alloc)
{
if (existing == nullptr)
return existing;
if (existing == subtrahend) {
if (!existing->m_left || !existing->m_right) {
// At least one child is null
node_type *temp = existing->m_left ?
existing->m_left : existing->m_right;
if (temp == nullptr) {
// Both were null
temp = existing;
existing = nullptr;
} else {
*existing = *temp;
}
alloc.free(temp);
} else {
// Both children exist, find next node
node_type *temp = existing->m_right;
while (temp->m_left)
temp = temp->m_left;
*existing = *temp;
existing->m_right = remove(existing->m_right, temp, alloc);
}
} else if (existing->compare(subtrahend) < 0) {
existing->m_left = remove(existing->m_left, subtrahend, alloc);
} else {
existing->m_right = remove(existing->m_right, subtrahend, alloc);
}
if (!existing)
return nullptr;
int balance = existing->update_height();
if (balance > 1) {
int left_balance = existing->m_left->update_height();
if (left_balance < 0)
existing->m_left = rotate_left(existing->m_left);
return rotate_right(existing);
} else if (balance < -1) {
int right_balance = existing->m_right->update_height();
if (right_balance > 0)
existing->m_right = rotate_right(existing->m_right);
return rotate_left(existing);
}
return existing;
}
int m_height; int m_height;
node_type *m_left; node_type *m_left;
node_type *m_right; node_type *m_right;
@@ -162,10 +220,21 @@ public:
using item_type = T; using item_type = T;
using node_type = avl_node<T>; using node_type = avl_node<T>;
inline void insert(node_type *addend) { m_root = node_type::insert(m_root, addend); }
inline node_type * root() { return m_root; } inline node_type * root() { return m_root; }
inline unsigned count() const { return m_count; }
inline void remove(node_type *subtrahend, allocator &alloc) {
m_root = node_type::remove(m_root, subtrahend, alloc);
m_count--;
}
inline void insert(node_type *addend) {
m_root = node_type::insert(m_root, addend);
m_count++;
}
private: private:
unsigned m_count {0};
node_type *m_root; node_type *m_root;
}; };

View File

@@ -12,24 +12,36 @@ namespace kutil {
/// A slab allocator for small structures kept in a linked list /// A slab allocator for small structures kept in a linked list
template <typename T, size_t N = memory::frame_size> template <typename T, size_t N = memory::frame_size>
class slab_allocator : class slab_allocator :
public linked_list<T> public linked_list<T>,
public allocator
{ {
public: public:
using item_type = list_node<T>; using item_type = list_node<T>;
/// Default constructor. /// Default constructor.
/// \arg chunk_size The size of chunk to allocate, in bytes. 0 means default.
/// \arg alloc The allocator to use to allocate chunks. Defaults to malloc(). /// \arg alloc The allocator to use to allocate chunks. Defaults to malloc().
slab_allocator(allocator &alloc) : slab_allocator(allocator &alloc) :
m_alloc(alloc) m_alloc(alloc) {}
/// Allocator interface implementation
virtual void * allocate(size_t size) override
{ {
kassert(size == sizeof(T), "Asked slab allocator for wrong size");
if (size != sizeof(T)) return 0;
return pop();
}
/// Allocator interface implementation
virtual void free(void *p) override
{
push(static_cast<item_type*>(p));
} }
/// Get an item from the cache. May allocate a new chunk if the cache is empty. /// Get an item from the cache. May allocate a new chunk if the cache is empty.
/// \returns An allocated element /// \returns An allocated element
inline item_type * pop() inline item_type * pop()
{ {
if (this->empty()) this->allocate(); if (this->empty()) this->allocate_chunk();
kassert(!this->empty(), "Slab allocator is empty after allocate()"); kassert(!this->empty(), "Slab allocator is empty after allocate()");
item_type *item = this->pop_front(); item_type *item = this->pop_front();
kutil::memset(item, 0, sizeof(item_type)); kutil::memset(item, 0, sizeof(item_type));
@@ -43,7 +55,8 @@ public:
this->push_front(item); this->push_front(item);
} }
void allocate() private:
void allocate_chunk()
{ {
constexpr unsigned count = N / sizeof(item_type); constexpr unsigned count = N / sizeof(item_type);
@@ -53,7 +66,6 @@ public:
this->push_back(&items[i]); this->push_back(&items[i]);
} }
private:
allocator& m_alloc; allocator& m_alloc;
}; };

View File

@@ -66,8 +66,10 @@ private:
using tree_type = kutil::avl_tree<vm_range>; using tree_type = kutil::avl_tree<vm_range>;
node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state); node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state);
node_type * consolidate(node_type* needle);
slab_allocator<node_type> m_alloc; slab_allocator<node_type> m_slab;
allocator &m_alloc;
tree_type m_ranges; tree_type m_ranges;
}; };

View File

@@ -1,16 +1,18 @@
#include <algorithm> #include <algorithm>
#include "kutil/logger.h" #include "kutil/logger.h"
#include "kutil/vector.h"
#include "kutil/vm_space.h" #include "kutil/vm_space.h"
namespace kutil { namespace kutil {
using node_type = kutil::avl_node<vm_range>; using node_type = kutil::avl_node<vm_range>;
using node_vec = kutil::vector<node_type*>;
vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) : vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
m_slab(alloc),
m_alloc(alloc) m_alloc(alloc)
{ {
node_type *node = m_alloc.pop(); node_type *node = m_slab.pop();
node->address = start; node->address = start;
node->size = size; node->size = size;
node->state = vm_state::none; node->state = vm_state::none;
@@ -23,8 +25,8 @@ vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
inline static bool inline static bool
overlaps(node_type *node, uintptr_t start, size_t size) overlaps(node_type *node, uintptr_t start, size_t size)
{ {
return start <= node->end() && return start < node->end() &&
(start + size) >= node->address; (start + size) > node->address;
} }
static node_type * static node_type *
@@ -46,7 +48,7 @@ node_type *
vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state state) vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state state)
{ {
// No cross-boundary splits allowed for now // No cross-boundary splits allowed for now
bool contained = bool contained =
start >= node->address && start >= node->address &&
start+size <= node->end(); start+size <= node->end();
@@ -59,23 +61,32 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
if (state == old_state) if (state == old_state)
return node; return node;
node->state = state;
log::debug(logs::vmem, "Splitting out region %016llx-%016llx[%d] from %016llx-%016llx[%d]", log::debug(logs::vmem, "Splitting out region %016llx-%016llx[%d] from %016llx-%016llx[%d]",
start, start+size, state, node->address, node->end(), old_state); start, start+size, state, node->address, node->end(), old_state);
bool do_consolidate = false;
if (node->address < start) { if (node->address < start) {
// Split off rest into new node // Split off rest into new node
size_t leading = start - node->address; size_t leading = start - node->address;
node_type *next = m_alloc.pop(); node_type *next = m_slab.pop();
next->state = state; next->state = state;
next->address = start; next->address = start;
next->size = node->size - leading; next->size = node->size - leading;
node->size = leading; node->size = leading;
node->state = old_state;
log::debug(logs::vmem,
" leading region %016llx-%016llx[%d]",
node->address, node->address + node->size, node->state);
m_ranges.insert(next); m_ranges.insert(next);
node = next; node = next;
} else { } else {
// TODO: check if this can merge with prev node do_consolidate = true;
} }
if (node->end() > start + size) { if (node->end() > start + size) {
@@ -83,27 +94,79 @@ vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state stat
size_t trailing = node->size - size; size_t trailing = node->size - size;
node->size -= trailing; node->size -= trailing;
node_type *next = m_alloc.pop(); node_type *next = m_slab.pop();
next->state = old_state; next->state = old_state;
next->address = node->end(); next->address = node->end();
next->size = trailing; next->size = trailing;
log::debug(logs::vmem,
" tailing region %016llx-%016llx[%d]",
next->address, next->address + next->size, next->state);
m_ranges.insert(next); m_ranges.insert(next);
} else { } else {
// TODO: check if this can merge with next node do_consolidate = true;
} }
if (do_consolidate)
node = consolidate(node);
return node; return node;
} }
inline void gather(node_type *node, node_vec &vec)
{
if (node) {
gather(node->left(), vec);
vec.append(node);
gather(node->right(), vec);
}
}
node_type *
vm_space::consolidate(node_type *needle)
{
node_vec nodes(m_ranges.count(), m_alloc);
gather(m_ranges.root(), nodes);
node_type *prev = nullptr;
for (auto *node : nodes) {
log::debug(logs::vmem,
"* Existing region %016llx-%016llx[%d]",
node->address, node->address + node->size, node->state);
if (prev && node->address == prev->end() && node->state == prev->state) {
log::debug(logs::vmem,
"Joining regions %016llx-%016llx[%d] %016llx-%016llx[%d]",
prev->address, prev->address + prev->size, prev->state,
node->address, node->address + node->size, node->state);
prev->size += node->size;
if (needle == node)
needle = prev;
m_ranges.remove(node, m_slab);
} else {
prev = node;
}
}
return needle;
}
uintptr_t uintptr_t
vm_space::reserve(uintptr_t start, size_t size) vm_space::reserve(uintptr_t start, size_t size)
{ {
log::debug(logs::vmem, "Reserving region %016llx-%016llx", start, start+size); log::debug(logs::vmem, "Reserving region %016llx-%016llx", start, start+size);
node_type *node = find_overlapping(m_ranges.root(), start, size); node_type *node = find_overlapping(m_ranges.root(), start, size);
if (!node || node->state != vm_state::none) if (!node) {
log::debug(logs::vmem, " found no match");
return 0; return 0;
} else if (node->state != vm_state::none) {
log::debug(logs::vmem, " found wrong state %016llx-%016llx[%d]",
node->address, node->address + node->size, node->state);
return 0;
}
node = split_out(node, start, size, vm_state::reserved); node = split_out(node, start, size, vm_state::reserved);
return node ? start : 0; return node ? start : 0;