[kernel] Move vm_space into kernel

The vm_space code should not have been in kutil, moving it to kernel.
This commit is contained in:
2020-09-13 16:11:24 -07:00
parent 9dee5e4138
commit e8564c755b
7 changed files with 12 additions and 21 deletions

View File

@@ -1,10 +1,10 @@
#include "kutil/assert.h"
#include "kutil/vm_space.h"
#include "kernel_memory.h"
#include "page_manager.h"
#include "buffer_cache.h"
#include "vm_space.h"
extern kutil::vm_space g_kernel_space;
extern vm_space g_kernel_space;
using memory::frame_size;
using memory::kernel_stack_pages;

View File

@@ -5,7 +5,6 @@
#include "initrd/initrd.h"
#include "kutil/assert.h"
#include "kutil/vm_space.h"
#include "apic.h"
#include "block_device.h"
#include "console.h"

View File

@@ -5,12 +5,12 @@
#include "kutil/assert.h"
#include "kutil/heap_allocator.h"
#include "kutil/no_construct.h"
#include "kutil/vm_space.h"
#include "frame_allocator.h"
#include "io.h"
#include "log.h"
#include "page_manager.h"
#include "vm_space.h"
using memory::frame_size;
using memory::heap_start;
@@ -24,7 +24,7 @@ using memory::table_entries;
using namespace kernel;
kutil::vm_space g_kernel_space {kernel_offset, (heap_start-kernel_offset)};
vm_space g_kernel_space {kernel_offset, (heap_start-kernel_offset)};
// These objects are initialized _before_ global constructors are called,
@@ -54,7 +54,7 @@ void walk_page_table(
page_table::level level,
uintptr_t &current_start,
size_t &current_bytes,
kutil::vm_space &kspace)
vm_space &kspace)
{
constexpr size_t huge_page_size = (1ull<<30);
constexpr size_t large_page_size = (1ull<<21);

View File

@@ -1,9 +1,9 @@
#include "kutil/assert.h"
#include "kutil/vm_space.h"
#include "console.h"
#include "io.h"
#include "log.h"
#include "page_manager.h"
#include "vm_space.h"
using memory::frame_size;
using memory::heap_start;
@@ -333,12 +333,12 @@ page_manager::fault_handler(uintptr_t addr)
if (!addr)
return false;
extern kutil::vm_space g_kernel_space;
extern vm_space g_kernel_space;
bool is_heap = addr >= ::memory::heap_start &&
addr < ::memory::heap_start + ::memory::kernel_max_heap;
if (!is_heap &&
g_kernel_space.get(addr) != kutil::vm_state::committed)
g_kernel_space.get(addr) != vm_state::committed)
return false;
uintptr_t page = addr & ~0xfffull;

256
src/kernel/vm_space.cpp Normal file
View File

@@ -0,0 +1,256 @@
#include <algorithm>
#include "kutil/vector.h"
#include "log.h"
#include "vm_space.h"
using node_type = kutil::avl_node<vm_range>;
using node_vec = kutil::vector<node_type*>;
DEFINE_SLAB_ALLOCATOR(node_type, 1);
vm_space::vm_space(uintptr_t start, size_t size)
{
node_type *node = new node_type;
node->address = start;
node->size = size;
node->state = vm_state::none;
m_ranges.insert(node);
log::info(logs::vmem, "Creating address space from %016llx-%016llx",
start, start+size);
}
vm_space::vm_space()
{
}
inline static bool
contains(node_type *node, uintptr_t start, size_t size)
{
return start >= node->address &&
size <= node->size;
}
inline static bool
overlaps(node_type *node, uintptr_t start, size_t size)
{
return start < node->end() &&
(start + size) > node->address;
}
static node_type *
find_overlapping(node_type *from, uintptr_t start, size_t size)
{
while (from) {
if (overlaps(from, start, size))
return from;
from = start < from->address ?
from->left() :
from->right();
}
return nullptr;
}
node_type *
vm_space::split_out(node_type *node, uintptr_t start, size_t size, vm_state state)
{
// No cross-boundary splits allowed for now
const bool contained = contains(node, start, size);
kassert(contained, "Tried to split an address range across existing boundaries");
if (!contained)
return nullptr;
vm_state old_state = node->state;
if (state == old_state)
return node;
node->state = state;
log::debug(logs::vmem, "Splitting out region %016llx-%016llx[%d] from %016llx-%016llx[%d]",
start, start+size, state, node->address, node->end(), old_state);
bool do_consolidate = false;
if (node->address < start) {
// Split off rest into new node
size_t leading = start - node->address;
node_type *next = new node_type;
next->address = start;
next->size = node->size - leading;
next->state = state;
node->size = leading;
node->state = old_state;
log::debug(logs::vmem,
" leading region %016llx-%016llx[%d]",
node->address, node->address + node->size, node->state);
m_ranges.insert(next);
node = next;
} else {
do_consolidate = true;
}
if (node->end() > start + size) {
// Split off remaining into new node
size_t trailing = node->size - size;
node->size -= trailing;
node_type *next = new node_type;
next->state = old_state;
next->address = node->end();
next->size = trailing;
log::debug(logs::vmem,
" tailing region %016llx-%016llx[%d]",
next->address, next->address + next->size, next->state);
m_ranges.insert(next);
} else {
do_consolidate = true;
}
if (do_consolidate)
node = consolidate(node);
return node;
}
node_type *
vm_space::find_empty(node_type *node, size_t size, vm_state state)
{
if (node->state == vm_state::none && node->size >= size)
return split_out(node, node->address, size, state);
if (node->left()) {
node_type *found = find_empty(node->left(), size, state);
if (found)
return found;
}
if (node->right()) {
node_type *found = find_empty(node->right(), size, state);
if (found)
return found;
}
return nullptr;
}
inline void gather(node_type *node, node_vec &vec)
{
if (node) {
gather(node->left(), vec);
vec.append(node);
gather(node->right(), vec);
}
}
node_type *
vm_space::consolidate(node_type *needle)
{
node_vec nodes(m_ranges.count());
gather(m_ranges.root(), nodes);
node_type *prev = nullptr;
for (auto *node : nodes) {
log::debug(logs::vmem,
"* Existing region %016llx-%016llx[%d]",
node->address, node->address + node->size, node->state);
if (prev && node->address == prev->end() && node->state == prev->state) {
log::debug(logs::vmem,
"Joining regions %016llx-%016llx[%d] %016llx-%016llx[%d]",
prev->address, prev->address + prev->size, prev->state,
node->address, node->address + node->size, node->state);
prev->size += node->size;
if (needle == node)
needle = prev;
m_ranges.remove(node);
} else {
prev = node;
}
}
return needle;
}
uintptr_t
vm_space::reserve(uintptr_t start, size_t size)
{
if (start == 0) {
log::debug(logs::vmem, "Reserving any region of size %llx", size);
node_type *node = find_empty(m_ranges.root(), size, vm_state::reserved);
if (!node) {
log::debug(logs::vmem, " found no large enough region");
return 0;
}
return node->address;
}
log::debug(logs::vmem, "Reserving region %016llx-%016llx",
start, start+size);
node_type *node = find_overlapping(m_ranges.root(), start, size);
if (!node) {
log::debug(logs::vmem, " found no match");
return 0;
}
node = split_out(node, start, size, vm_state::reserved);
return node ? start : 0;
}
void
vm_space::unreserve(uintptr_t start, size_t size)
{
log::debug(logs::vmem, "Unreserving region %016llx-%016llx", start, start+size);
node_type *node = find_overlapping(m_ranges.root(), start, size);
if (!node || !contains(node, start, size)) {
log::debug(logs::vmem, " found no match");
return;
}
split_out(node, start, size, vm_state::none);
}
uintptr_t
vm_space::commit(uintptr_t start, size_t size)
{
if (start == 0) {
log::debug(logs::vmem, "Committing any region of size %llx", size);
node_type *node = find_empty(m_ranges.root(), size, vm_state::committed);
if (!node) {
log::debug(logs::vmem, " found no large enough region");
return 0;
}
return node->address;
}
log::debug(logs::vmem, "Committing region %016llx-%016llx",
start, start+size);
node_type *node = find_overlapping(m_ranges.root(), start, size);
if (!node) {
log::debug(logs::vmem, " found no match");
return 0;
}
node = split_out(node, start, size, vm_state::committed);
return node ? start : 0;
}
vm_state
vm_space::get(uintptr_t addr)
{
node_type *node = find_overlapping(m_ranges.root(), addr, 1);
return node ? node->state : vm_state::unknown;
}

73
src/kernel/vm_space.h Normal file
View File

@@ -0,0 +1,73 @@
#pragma once
/// \file vm_range.h
/// Structure for tracking a range of virtual memory addresses
#include <stdint.h>
#include "kutil/avl_tree.h"
enum class vm_state : uint8_t {
unknown,
none,
reserved,
committed
};
struct vm_range
{
uintptr_t address;
size_t size;
vm_state state;
inline uintptr_t end() const { return address + size; }
inline int64_t compare(const vm_range *other) const {
if (address > other->address) return -1;
else if (address < other->address) return 1;
else return 0;
}
};
/// Tracks a region of virtual memory address space
class vm_space
{
public:
/// Default constructor. Define an empty range.
vm_space();
/// Constructor. Define a range of managed VM space.
/// \arg start Starting address of the managed space
/// \arg size Size of the managed space, in bytes
vm_space(uintptr_t start, size_t size);
/// Reserve a section of address space.
/// \arg start Starting address of reservaion, or 0 for any address
/// \arg size Size of reservation in bytes
/// \returns The address of the reservation, or 0 on failure
uintptr_t reserve(uintptr_t start, size_t size);
/// Unreserve (and uncommit, if committed) a section of address space.
/// \arg start Starting address of reservaion
/// \arg size Size of reservation in bytes
void unreserve(uintptr_t start, size_t size);
/// Mark a section of address space as committed.
/// \arg start Starting address of reservaion, or 0 for any address
/// \arg size Size of reservation in bytes
/// \returns The address of the reservation, or 0 on failure
uintptr_t commit(uintptr_t start, size_t size);
/// Check the state of the given address.
/// \arg addr The address to check
/// \returns The state of the memory if known, or 'unknown'
vm_state get(uintptr_t addr);
private:
using node_type = kutil::avl_node<vm_range>;
using tree_type = kutil::avl_tree<vm_range>;
node_type * split_out(node_type* node, uintptr_t start, size_t size, vm_state state);
node_type * consolidate(node_type* needle);
node_type * find_empty(node_type* node, size_t size, vm_state state);
tree_type m_ranges;
};