Finish address_manager to vm_space transition

This commit is contained in:
Justin C. Miller
2019-05-18 18:06:57 -07:00
parent 2d54eb5143
commit ce035d2a43
8 changed files with 75 additions and 104 deletions

View File

@@ -25,6 +25,9 @@ namespace memory {
/// Max size of the kernel heap
static const size_t kernel_max_heap = 0x800000000; // 32GiB
/// Start of the kernel heap
static const uintptr_t heap_start = page_offset - kernel_max_heap;
/// Helper to determine if a physical address can be accessed
/// through the page_offset area.
inline bool page_mappable(uintptr_t a) { return (a & page_offset) == 0; }

View File

@@ -3,6 +3,7 @@
#include "initrd/initrd.h"
#include "kutil/assert.h"
#include "kutil/heap_allocator.h"
#include "kutil/vm_space.h"
#include "apic.h"
#include "block_device.h"
@@ -27,6 +28,8 @@ extern "C" {
extern void __kernel_assert(const char *, unsigned, const char *);
extern kutil::heap_allocator g_kernel_heap;
void
init_console()
{
@@ -52,12 +55,14 @@ kernel_main(kernel_args *header)
gdt_init();
interrupts_init();
kutil::allocator &heap = memory_initialize(
memory_initialize(
header->scratch_pages,
header->memory_map,
header->memory_map_length,
header->memory_map_desc_size);
kutil::allocator &heap = g_kernel_heap;
if (header->frame_buffer && header->frame_buffer_length) {
page_manager::get()->map_offset_pointer(
&header->frame_buffer,
@@ -73,13 +78,6 @@ kernel_main(kernel_args *header)
log::debug(logs::boot, "ACPI root table is at: %016lx", header->acpi_table);
log::debug(logs::boot, "Runtime service is at: %016lx", header->runtime);
kutil::vm_space k_space(
memory::kernel_offset,
memory::page_offset - memory::kernel_offset,
heap);
k_space.reserve(0xffffff0000100000, 0x100000);
k_space.reserve(0xffffff0000200000, 0x100000);
initrd::disk ird(header->initrd, heap);
log::info(logs::boot, "initrd loaded with %d files.", ird.files().count());
for (auto &f : ird.files())

View File

@@ -1,22 +1,26 @@
#include <algorithm>
#include <utility>
#include "kutil/address_manager.h"
#include "kutil/assert.h"
#include "kutil/heap_allocator.h"
#include "kutil/vm_space.h"
#include "frame_allocator.h"
#include "io.h"
#include "log.h"
#include "page_manager.h"
using memory::frame_size;
using memory::heap_start;
using memory::kernel_max_heap;
using memory::kernel_offset;
using memory::page_offset;
static const unsigned ident_page_flags = 0xb;
kutil::address_manager g_kernel_address_manager;
kutil::vm_space g_kspace;
kutil::heap_allocator g_kernel_heap;
bool g_memory_initialized = false;
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
@@ -117,18 +121,14 @@ public:
}
}
void add_used_frames(kutil::address_manager &am) {
void add_used_frames(kutil::vm_space &vm) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::popcorn_data ||
desc->type == efi_memory_type::popcorn_initrd)
desc->type == efi_memory_type::popcorn_initrd ||
desc->type == efi_memory_type::popcorn_kernel)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
am.mark(virt_addr, desc->pages * frame_size);
}
else if (desc->type == efi_memory_type::popcorn_kernel)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
am.mark_permanent(virt_addr, desc->pages * frame_size);
vm.commit(virt_addr, desc->pages * frame_size);
}
}
}
@@ -157,9 +157,11 @@ private:
const memory_map map;
};
kutil::allocator &
void
memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length)
{
g_memory_initialized = false;
// make sure the options we want in CR4 are set
uint64_t cr4;
__asm__ __volatile__ ( "mov %%cr4, %0" : "=r" (cr4) );
@@ -199,27 +201,12 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
frame_allocator *fa = new (&g_frame_allocator) frame_allocator;
bootstrap.add_free_frames(*fa);
// Build an initial address manager that we'll copy into the real
// address manager later (so that we can use a raw allocator now)
kutil::allocator &alloc = fa->raw_allocator();
kutil::address_manager init_am(alloc);
init_am.add_regions(kernel_offset, page_offset - kernel_offset);
bootstrap.add_used_frames(init_am);
// Add the heap into the address manager
uintptr_t heap_start = page_offset - kernel_max_heap;
init_am.mark(heap_start, kernel_max_heap);
kutil::allocator *heap_alloc =
new (&g_kernel_heap) kutil::heap_allocator(heap_start, kernel_max_heap);
// Copy everything into the real address manager
kutil::address_manager *am =
new (&g_kernel_address_manager) kutil::address_manager(
std::move(init_am), *heap_alloc);
new (&g_kernel_heap) kutil::heap_allocator(heap_start, kernel_max_heap);
new (&g_kspace) kutil::vm_space(kernel_offset, (page_offset-kernel_offset), g_kernel_heap);
bootstrap.add_used_frames(g_kspace);
// Create the page manager
page_manager *pm = new (&g_page_manager) page_manager(*fa, *am);
page_manager *pm = new (&g_page_manager) page_manager(*fa);
// Give the frame_allocator back the rest of the scratch pages
fa->free(scratch_phys + (3 * frame_size), scratch_pages - 3);
@@ -237,6 +224,4 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
// Reclaim the old PML4
fa->free(scratch_phys, 1);
return *heap_alloc;
}

View File

@@ -1,21 +1,21 @@
#include <algorithm>
#include "kutil/assert.h"
#include "kutil/vm_space.h"
#include "console.h"
#include "io.h"
#include "log.h"
#include "page_manager.h"
using memory::frame_size;
using memory::heap_start;
using memory::kernel_max_heap;
using memory::kernel_offset;
using memory::page_offset;
using memory::page_mappable;
extern kutil::address_manager g_kernel_address_manager;
page_manager g_page_manager(
g_frame_allocator,
g_kernel_address_manager);
page_manager g_page_manager(g_frame_allocator);
extern kutil::vm_space g_kspace;
static uintptr_t
pt_to_phys(page_table *pt)
@@ -38,12 +38,10 @@ struct free_page_header
};
page_manager::page_manager(
frame_allocator &frames,
kutil::address_manager &addrs) :
page_manager::page_manager(frame_allocator &frames) :
m_page_cache(nullptr),
m_frames(frames),
m_addrs(addrs)
m_memory_initialized(false)
{
}
@@ -72,52 +70,18 @@ page_manager::copy_page(uintptr_t orig)
bool paged_orig = false;
bool paged_copy = false;
uintptr_t orig_virt;
if (page_mappable(orig)) {
orig_virt = orig + page_offset;
} else {
orig_virt = m_addrs.allocate(frame_size);
page_in(get_pml4(), orig, orig_virt, 1);
paged_orig = true;
}
uintptr_t copy = 0;
uintptr_t copy_virt;
size_t n = m_frames.allocate(1, &copy);
kassert(n, "copy_page could not allocate page");
if (page_mappable(copy)) {
copy_virt = copy + page_offset;
} else {
copy_virt = m_addrs.allocate(frame_size);
page_in(get_pml4(), copy, copy_virt, 1);
paged_copy = true;
}
// TODO: multiple page copies at a time, so that we don't have to keep
// paying this mapping penalty
if (paged_orig || paged_copy) {
set_pml4(get_pml4());
__sync_synchronize();
io_wait();
}
uintptr_t orig_virt = orig + page_offset;
uintptr_t copy_virt = copy + page_offset;
kutil::memcpy(
reinterpret_cast<void *>(copy_virt),
reinterpret_cast<void *>(orig_virt),
frame_size);
if (paged_orig) {
page_out(get_pml4(), orig_virt, 1);
m_addrs.free(orig_virt);
}
if (paged_copy) {
page_out(get_pml4(), copy_virt, 1);
m_addrs.free(copy_virt);
}
return copy;
}
@@ -246,10 +210,7 @@ page_manager::free_table_pages(void *pages, size_t count)
void *
page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *pml4)
{
if (!address) {
kassert(!user, "Cannot call map_pages with 0 address for user mapping");
address = m_addrs.allocate(count * frame_size);
}
kassert(address, "Cannot call map_pages with 0 address");
void *ret = reinterpret_cast<void *>(address);
if (!pml4) pml4 = get_pml4();
@@ -346,19 +307,21 @@ page_manager::unmap_pages(void* address, size_t count, page_table *pml4)
uintptr_t iaddr = reinterpret_cast<uintptr_t>(address);
page_out(pml4, iaddr, count, true);
if (iaddr >= kernel_offset) {
// TODO
// m_addrs.free(address, count);
}
}
bool
page_manager::fault_handler(uintptr_t addr)
{
if (!m_addrs.contains(addr))
if (!addr)
return false;
if (m_memory_initialized &&
g_kspace.get(addr) != kutil::vm_state::committed)
return false;
uintptr_t page = addr & ~0xfffull;
log::debug(logs::memory, "PF: attempting to page in %016lx for %016lx", page, addr);
bool user = addr < kernel_offset;
map_pages(page, 1, user);

View File

@@ -5,10 +5,9 @@
#include <stddef.h>
#include <stdint.h>
#include "kutil/address_manager.h"
#include "kutil/enum_bitfields.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
#include "kutil/memory.h"
#include "frame_allocator.h"
#include "kernel_memory.h"
#include "page_table.h"
@@ -19,9 +18,9 @@ struct free_page_header;
class page_manager
{
public:
page_manager(
frame_allocator &frames,
kutil::address_manager &addrs);
/// Constructor.
/// \arg frames The frame allocator to get physical frames from
page_manager(frame_allocator &frames);
/// Helper to get the number of pages needed for a given number of bytes.
/// \arg bytes The number of bytes desired
@@ -67,8 +66,7 @@ public:
page_table_indices index = {});
/// Allocate and map pages into virtual memory.
/// \arg address The virtual address at which to map the pages, or zero
/// for any free kernel space.
/// \arg address The virtual address at which to map the pages
/// \arg count The number of pages to map
/// \arg user True is this memory is user-accessible
/// \arg pml4 The pml4 to map into - null for the current one
@@ -176,7 +174,8 @@ private:
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
frame_allocator &m_frames;
kutil::address_manager &m_addrs;
bool m_memory_initialized;
friend class memory_bootstrap;
page_manager(const page_manager &) = delete;
@@ -211,7 +210,7 @@ page_table_align(T p)
/// Bootstrap the memory managers.
kutil::allocator & memory_initialize(
void memory_initialize(
uint16_t scratch_pages,
const void *memory_map,
size_t map_length,

View File

@@ -2,6 +2,7 @@
/// \file avl_tree.h
/// Templated container class for an AVL tree
#include <algorithm>
#include <stdint.h>
#include "kutil/assert.h"
@@ -220,6 +221,14 @@ public:
using item_type = T;
using node_type = avl_node<T>;
avl_tree() = default;
avl_tree(avl_tree &&other) :
m_count(other.m_count), m_root(other.m_root)
{
other.m_root = nullptr;
other.m_count = 0;
}
inline node_type * root() { return m_root; }
inline unsigned count() const { return m_count; }
@@ -235,7 +244,7 @@ public:
private:
unsigned m_count {0};
node_type *m_root;
node_type *m_root {nullptr};
};
} // namespace kutil

View File

@@ -29,9 +29,17 @@ struct vm_range
}
};
/// Tracks a region of virtual memory address space
class vm_space
{
public:
/// Default constructor. Define an empty range.
vm_space();
/// Constructor. Define a range of managed VM space.
/// \arg start Starting address of the managed space
/// \arg size Size of the managed space, in bytes
/// \arg alloc Allocator to use for tracking objects
vm_space(uintptr_t start, size_t size, kutil::allocator &alloc);
/// Reserve a section of address space.

View File

@@ -22,6 +22,12 @@ vm_space::vm_space(uintptr_t start, size_t size, allocator &alloc) :
start, start+size);
}
vm_space::vm_space() :
m_slab(allocator::invalid),
m_alloc(allocator::invalid)
{
}
inline static bool
overlaps(node_type *node, uintptr_t start, size_t size)
{