Allow page table copying and unmapping
Lots of rearranging in page_manager as well, moving constants out as well as helper structs.
This commit is contained in:
22
src/kernel/kernel_memory.h
Normal file
22
src/kernel/kernel_memory.h
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
#pragma once
|
||||||
|
/// \file kernel_memory.h
|
||||||
|
/// Constants related to the kernel's memory layout
|
||||||
|
|
||||||
|
namespace memory {
|
||||||
|
|
||||||
|
/// Size of a single page frame.
|
||||||
|
static const size_t frame_size = 0x1000;
|
||||||
|
|
||||||
|
/// Start of kernel memory.
|
||||||
|
static const uintptr_t kernel_offset = 0xffffff0000000000;
|
||||||
|
|
||||||
|
/// Offset from physical where page tables are mapped.
|
||||||
|
static const uintptr_t page_offset = 0xffffff8000000000;
|
||||||
|
|
||||||
|
/// Initial process thread's stack address
|
||||||
|
static const uintptr_t initial_stack = 0x0000800000000000;
|
||||||
|
|
||||||
|
/// Initial process thread's stack size, in pages
|
||||||
|
static const unsigned initial_stack_pages = 1;
|
||||||
|
|
||||||
|
} // namespace memory
|
||||||
@@ -11,21 +11,22 @@
|
|||||||
using kutil::frame_block;
|
using kutil::frame_block;
|
||||||
using kutil::frame_block_flags;
|
using kutil::frame_block_flags;
|
||||||
using kutil::frame_block_list;
|
using kutil::frame_block_list;
|
||||||
|
using memory::frame_size;
|
||||||
|
using memory::kernel_offset;
|
||||||
|
using memory::page_offset;
|
||||||
|
|
||||||
static const unsigned ident_page_flags = 0xb;
|
static const unsigned ident_page_flags = 0xb;
|
||||||
static const size_t page_size = page_manager::page_size;
|
|
||||||
|
|
||||||
extern kutil::frame_allocator g_frame_allocator;
|
|
||||||
|
|
||||||
|
kutil::frame_allocator g_frame_allocator;
|
||||||
kutil::address_manager g_kernel_address_manager;
|
kutil::address_manager g_kernel_address_manager;
|
||||||
kutil::heap_manager g_kernel_heap_manager;
|
kutil::heap_manager g_kernel_heap_manager;
|
||||||
|
|
||||||
void * mm_grow_callback(size_t length)
|
void * mm_grow_callback(size_t length)
|
||||||
{
|
{
|
||||||
kassert(length % page_manager::page_size == 0,
|
kassert(length % frame_size == 0,
|
||||||
"Heap manager requested a fractional page.");
|
"Heap manager requested a fractional page.");
|
||||||
|
|
||||||
size_t pages = length / page_manager::page_size;
|
size_t pages = length / frame_size;
|
||||||
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
|
log::info(logs::memory, "Heap manager growing heap by %d pages.", pages);
|
||||||
|
|
||||||
uintptr_t addr = g_kernel_address_manager.allocate(length);
|
uintptr_t addr = g_kernel_address_manager.allocate(length);
|
||||||
@@ -40,19 +41,19 @@ namespace {
|
|||||||
struct page_consumer
|
struct page_consumer
|
||||||
{
|
{
|
||||||
page_consumer(uintptr_t start, unsigned count, unsigned used = 0) :
|
page_consumer(uintptr_t start, unsigned count, unsigned used = 0) :
|
||||||
current(start + used * page_size),
|
current(start + used * frame_size),
|
||||||
used(used),
|
used(used),
|
||||||
max(count) {}
|
max(count) {}
|
||||||
|
|
||||||
void * get_page() {
|
void * get_page() {
|
||||||
kassert(used++ < max, "page_consumer ran out of pages");
|
kassert(used++ < max, "page_consumer ran out of pages");
|
||||||
void *retval = reinterpret_cast<void *>(current);
|
void *retval = reinterpret_cast<void *>(current);
|
||||||
current += page_size;
|
current += frame_size;
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * operator()(size_t size) {
|
void * operator()(size_t size) {
|
||||||
kassert(size == page_size, "page_consumer used with non-page size!");
|
kassert(size == frame_size, "page_consumer used with non-page size!");
|
||||||
return get_page();
|
return get_page();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +234,7 @@ page_in_ident(
|
|||||||
// Do a 2MiB page instead
|
// Do a 2MiB page instead
|
||||||
tables[2]->entries[idx[2]] = phys_addr | 0x80 | ident_page_flags;
|
tables[2]->entries[idx[2]] = phys_addr | 0x80 | ident_page_flags;
|
||||||
|
|
||||||
phys_addr += page_size * 512;
|
phys_addr += frame_size * 512;
|
||||||
count -= 512;
|
count -= 512;
|
||||||
if (count == 0) return pages_consumed;
|
if (count == 0) return pages_consumed;
|
||||||
continue;
|
continue;
|
||||||
@@ -245,7 +246,7 @@ page_in_ident(
|
|||||||
|
|
||||||
for (; idx[3] < 512; idx[3] += 1) {
|
for (; idx[3] < 512; idx[3] += 1) {
|
||||||
tables[3]->entries[idx[3]] = phys_addr | ident_page_flags;
|
tables[3]->entries[idx[3]] = phys_addr | ident_page_flags;
|
||||||
phys_addr += page_size;
|
phys_addr += frame_size;
|
||||||
if (--count == 0) return pages_consumed;
|
if (--count == 0) return pages_consumed;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -277,7 +278,7 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
|
|||||||
// The tables are ident-mapped currently, so the cr3 physical address works. But let's
|
// The tables are ident-mapped currently, so the cr3 physical address works. But let's
|
||||||
// get them into the offset-mapped area asap.
|
// get them into the offset-mapped area asap.
|
||||||
page_table *tables = reinterpret_cast<page_table *>(scratch_phys);
|
page_table *tables = reinterpret_cast<page_table *>(scratch_phys);
|
||||||
uintptr_t scratch_virt = scratch_phys + page_manager::page_offset;
|
uintptr_t scratch_virt = scratch_phys + page_offset;
|
||||||
|
|
||||||
uint64_t used_pages = 1; // starts with PML4
|
uint64_t used_pages = 1; // starts with PML4
|
||||||
used_pages += page_in_ident(
|
used_pages += page_in_ident(
|
||||||
@@ -295,7 +296,7 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
|
|||||||
// taking inventory of free pages.
|
// taking inventory of free pages.
|
||||||
page_consumer allocator(scratch_virt, scratch_pages, used_pages);
|
page_consumer allocator(scratch_virt, scratch_pages, used_pages);
|
||||||
|
|
||||||
block_allocator block_slab(page_size, allocator);
|
block_allocator block_slab(frame_size, allocator);
|
||||||
frame_block_list used;
|
frame_block_list used;
|
||||||
frame_block_list free;
|
frame_block_list free;
|
||||||
|
|
||||||
@@ -305,15 +306,13 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
|
|||||||
// Now go back through these lists and consolidate
|
// Now go back through these lists and consolidate
|
||||||
block_slab.append(frame_block::consolidate(free));
|
block_slab.append(frame_block::consolidate(free));
|
||||||
|
|
||||||
region_allocator region_slab(page_size, allocator);
|
region_allocator region_slab(frame_size, allocator);
|
||||||
region_slab.allocate(); // Allocate some buddy regions for the address_manager
|
region_slab.allocate(); // Allocate some buddy regions for the address_manager
|
||||||
|
|
||||||
kutil::address_manager *am =
|
kutil::address_manager *am =
|
||||||
new (&g_kernel_address_manager) kutil::address_manager(std::move(region_slab));
|
new (&g_kernel_address_manager) kutil::address_manager(std::move(region_slab));
|
||||||
|
|
||||||
am->add_regions(
|
am->add_regions(kernel_offset, page_offset - kernel_offset);
|
||||||
page_manager::high_offset,
|
|
||||||
page_manager::page_offset - page_manager::high_offset);
|
|
||||||
|
|
||||||
// Finally, build an acutal set of kernel page tables that just contains
|
// Finally, build an acutal set of kernel page tables that just contains
|
||||||
// what the kernel actually has mapped, but making everything writable
|
// what the kernel actually has mapped, but making everything writable
|
||||||
@@ -323,7 +322,7 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
|
|||||||
|
|
||||||
kutil::frame_allocator *fa =
|
kutil::frame_allocator *fa =
|
||||||
new (&g_frame_allocator) kutil::frame_allocator(std::move(block_slab));
|
new (&g_frame_allocator) kutil::frame_allocator(std::move(block_slab));
|
||||||
page_manager *pm = new (&g_page_manager) page_manager(*fa);
|
page_manager *pm = new (&g_page_manager) page_manager(*fa, *am);
|
||||||
|
|
||||||
// Give the rest to the page_manager's cache for use in page_in
|
// Give the rest to the page_manager's cache for use in page_in
|
||||||
pm->free_table_pages(
|
pm->free_table_pages(
|
||||||
@@ -339,15 +338,15 @@ memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_len
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case frame_block_flags::map_kernel:
|
case frame_block_flags::map_kernel:
|
||||||
virt_addr = block->address + page_manager::high_offset;
|
virt_addr = block->address + kernel_offset;
|
||||||
if (block->flags && frame_block_flags::permanent)
|
if (block->flags && frame_block_flags::permanent)
|
||||||
am->mark_permanent(virt_addr, block->count * page_size);
|
am->mark_permanent(virt_addr, block->count * frame_size);
|
||||||
else
|
else
|
||||||
am->mark(virt_addr, block->count * page_size);
|
am->mark(virt_addr, block->count * frame_size);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case frame_block_flags::map_offset:
|
case frame_block_flags::map_offset:
|
||||||
virt_addr = block->address + page_manager::page_offset;
|
virt_addr = block->address + page_offset;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -5,21 +5,28 @@
|
|||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "page_manager.h"
|
#include "page_manager.h"
|
||||||
|
|
||||||
kutil::frame_allocator g_frame_allocator;
|
using memory::frame_size;
|
||||||
page_manager g_page_manager(g_frame_allocator);
|
using memory::kernel_offset;
|
||||||
|
using memory::page_offset;
|
||||||
|
|
||||||
|
extern kutil::frame_allocator g_frame_allocator;
|
||||||
|
extern kutil::address_manager g_kernel_address_manager;
|
||||||
|
page_manager g_page_manager(
|
||||||
|
g_frame_allocator,
|
||||||
|
g_kernel_address_manager);
|
||||||
|
|
||||||
|
|
||||||
static uintptr_t
|
static uintptr_t
|
||||||
pt_to_phys(page_table *pt)
|
pt_to_phys(page_table *pt)
|
||||||
{
|
{
|
||||||
return reinterpret_cast<uintptr_t>(pt) - page_manager::page_offset;
|
return reinterpret_cast<uintptr_t>(pt) - page_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static page_table *
|
static page_table *
|
||||||
pt_from_phys(uintptr_t p)
|
pt_from_phys(uintptr_t p)
|
||||||
{
|
{
|
||||||
return reinterpret_cast<page_table *>((p + page_manager::page_offset) & ~0xfffull);
|
return reinterpret_cast<page_table *>((p + page_offset) & ~0xfffull);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -30,9 +37,12 @@ struct free_page_header
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
page_manager::page_manager(kutil::frame_allocator &frames) :
|
page_manager::page_manager(
|
||||||
|
kutil::frame_allocator &frames,
|
||||||
|
kutil::address_manager &addrs) :
|
||||||
m_page_cache(nullptr),
|
m_page_cache(nullptr),
|
||||||
m_frames(frames)
|
m_frames(frames),
|
||||||
|
m_addrs(addrs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,25 +51,82 @@ page_manager::create_process_map()
|
|||||||
{
|
{
|
||||||
page_table *table = get_table_page();
|
page_table *table = get_table_page();
|
||||||
|
|
||||||
kutil::memset(table, 0, page_size);
|
kutil::memset(table, 0, frame_size);
|
||||||
table->entries[510] = m_kernel_pml4->entries[510];
|
table->entries[510] = m_kernel_pml4->entries[510];
|
||||||
table->entries[511] = m_kernel_pml4->entries[511];
|
table->entries[511] = m_kernel_pml4->entries[511];
|
||||||
|
|
||||||
// Create the initial user stack
|
// Create the initial user stack
|
||||||
map_pages(
|
map_pages(
|
||||||
initial_stack - (initial_stack_pages * page_size),
|
memory::initial_stack - (memory::initial_stack_pages * frame_size),
|
||||||
initial_stack_pages,
|
memory::initial_stack_pages,
|
||||||
true, // This is the ring3 stack, user = true
|
true, // This is the ring3 stack, user = true
|
||||||
table);
|
table);
|
||||||
|
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
uintptr_t
|
||||||
page_manager::delete_process_map(page_table *table)
|
page_manager::copy_page(uintptr_t orig)
|
||||||
{
|
{
|
||||||
// TODO: recurse table entries and mark them free
|
uintptr_t virt = m_addrs.allocate(2 * frame_size);
|
||||||
unmap_pages(table, 1);
|
uintptr_t copy = 0;
|
||||||
|
|
||||||
|
size_t n = m_frames.allocate(1, ©);
|
||||||
|
kassert(n, "copy_page could not allocate page");
|
||||||
|
|
||||||
|
page_in(get_pml4(), orig, virt, 1);
|
||||||
|
page_in(get_pml4(), copy, virt + frame_size, 1);
|
||||||
|
|
||||||
|
kutil::memcpy(
|
||||||
|
reinterpret_cast<void *>(virt + frame_size),
|
||||||
|
reinterpret_cast<void *>(virt),
|
||||||
|
frame_size);
|
||||||
|
|
||||||
|
page_out(get_pml4(), virt, 2);
|
||||||
|
|
||||||
|
m_addrs.free(virt);
|
||||||
|
return copy;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_table *
|
||||||
|
page_manager::copy_table(page_table *from, page_table::level lvl)
|
||||||
|
{
|
||||||
|
page_table *to = get_table_page();
|
||||||
|
|
||||||
|
const int max =
|
||||||
|
lvl == page_table::level::pml4 ?
|
||||||
|
510 :
|
||||||
|
512;
|
||||||
|
|
||||||
|
for (int i = 0; i < max; ++i) {
|
||||||
|
if (!from->is_present(i)) {
|
||||||
|
to->entries[i] = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_page =
|
||||||
|
lvl == page_table::level::pt ||
|
||||||
|
from->is_large_page(lvl, i);
|
||||||
|
|
||||||
|
if (is_page) {
|
||||||
|
uint16_t flags = from->entries[i] & 0xfffull;
|
||||||
|
uintptr_t orig = from->entries[i] & ~0xfffull;
|
||||||
|
to->entries[i] = copy_page(orig) | flags;
|
||||||
|
} else {
|
||||||
|
uint16_t flags = 0;
|
||||||
|
page_table *next_from = from->get(i, &flags);
|
||||||
|
page_table *next_to = copy_table(next_from, page_table::deeper(lvl));
|
||||||
|
to->set(i, next_to, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return to;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
page_manager::delete_process_map(page_table *pml4)
|
||||||
|
{
|
||||||
|
unmap_table(pml4, page_table::level::pml4, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -67,7 +134,7 @@ page_manager::map_offset_pointer(void **pointer, size_t length)
|
|||||||
{
|
{
|
||||||
uintptr_t *p = reinterpret_cast<uintptr_t *>(pointer);
|
uintptr_t *p = reinterpret_cast<uintptr_t *>(pointer);
|
||||||
uintptr_t v = *p + page_offset;
|
uintptr_t v = *p + page_offset;
|
||||||
uintptr_t c = ((length - 1) / page_size) + 1;
|
uintptr_t c = ((length - 1) / frame_size) + 1;
|
||||||
|
|
||||||
page_table *pml4 = get_pml4();
|
page_table *pml4 = get_pml4();
|
||||||
page_in(pml4, *p, v, c);
|
page_in(pml4, *p, v, c);
|
||||||
@@ -75,11 +142,10 @@ page_manager::map_offset_pointer(void **pointer, size_t length)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
page_manager::dump_pml4(page_table *pml4, int max_index)
|
page_manager::dump_pml4(page_table *pml4, bool recurse)
|
||||||
{
|
{
|
||||||
if (pml4 == nullptr)
|
if (pml4 == nullptr) pml4 = get_pml4();
|
||||||
pml4 = get_pml4();
|
pml4->dump(page_table::level::pml4, recurse);
|
||||||
pml4->dump(4, max_index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
page_table *
|
page_table *
|
||||||
@@ -94,11 +160,11 @@ page_manager::get_table_page()
|
|||||||
m_page_cache = reinterpret_cast<free_page_header *>(virt);
|
m_page_cache = reinterpret_cast<free_page_header *>(virt);
|
||||||
|
|
||||||
// The last one needs to be null, so do n-1
|
// The last one needs to be null, so do n-1
|
||||||
uintptr_t end = virt + (n-1) * page_size;
|
uintptr_t end = virt + (n-1) * frame_size;
|
||||||
while (virt < end) {
|
while (virt < end) {
|
||||||
reinterpret_cast<free_page_header *>(virt)->next =
|
reinterpret_cast<free_page_header *>(virt)->next =
|
||||||
reinterpret_cast<free_page_header *>(virt + page_size);
|
reinterpret_cast<free_page_header *>(virt + frame_size);
|
||||||
virt += page_size;
|
virt += frame_size;
|
||||||
}
|
}
|
||||||
reinterpret_cast<free_page_header *>(virt)->next = nullptr;
|
reinterpret_cast<free_page_header *>(virt)->next = nullptr;
|
||||||
|
|
||||||
@@ -115,7 +181,7 @@ page_manager::free_table_pages(void *pages, size_t count)
|
|||||||
{
|
{
|
||||||
uintptr_t start = reinterpret_cast<uintptr_t>(pages);
|
uintptr_t start = reinterpret_cast<uintptr_t>(pages);
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
uintptr_t addr = start + (i * page_size);
|
uintptr_t addr = start + (i * frame_size);
|
||||||
free_page_header *header = reinterpret_cast<free_page_header *>(addr);
|
free_page_header *header = reinterpret_cast<free_page_header *>(addr);
|
||||||
header->count = 1;
|
header->count = 1;
|
||||||
header->next = m_page_cache;
|
header->next = m_page_cache;
|
||||||
@@ -138,7 +204,7 @@ page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *
|
|||||||
|
|
||||||
page_in(pml4, phys, address, n, user);
|
page_in(pml4, phys, address, n, user);
|
||||||
|
|
||||||
address += n * page_size;
|
address += n * frame_size;
|
||||||
count -= n;
|
count -= n;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,10 +212,53 @@ page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
page_manager::unmap_pages(void* address, size_t count)
|
page_manager::unmap_table(page_table *table, page_table::level lvl, bool free)
|
||||||
{
|
{
|
||||||
// TODO: uh, actually unmap that shit??
|
const int max =
|
||||||
m_frames.free(reinterpret_cast<uintptr_t>(address), count);
|
lvl == page_table::level::pml4 ?
|
||||||
|
510 :
|
||||||
|
512;
|
||||||
|
|
||||||
|
uintptr_t free_start = 0;
|
||||||
|
uintptr_t free_count = 0;
|
||||||
|
|
||||||
|
size_t size =
|
||||||
|
lvl == page_table::level::pdp ? (1<<30) :
|
||||||
|
lvl == page_table::level::pd ? (1<<21) :
|
||||||
|
lvl == page_table::level::pt ? (1<<12) :
|
||||||
|
0;
|
||||||
|
|
||||||
|
for (int i = 0; i < max; ++i) {
|
||||||
|
if (!table->is_present(i)) continue;
|
||||||
|
|
||||||
|
bool is_page =
|
||||||
|
lvl == page_table::level::pt ||
|
||||||
|
table->is_large_page(lvl, i);
|
||||||
|
|
||||||
|
if (is_page) {
|
||||||
|
uintptr_t frame = table->entries[i] & ~0xfffull;
|
||||||
|
if (!free_count || free_start != frame + free_count * size) {
|
||||||
|
if (free_count && free)
|
||||||
|
m_frames.free(free_start, free_count * size / frame_size);
|
||||||
|
free_start = frame;
|
||||||
|
free_count = 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
page_table *next = table->get(i);
|
||||||
|
unmap_table(next, page_table::deeper(lvl), free);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (free_count && free)
|
||||||
|
m_frames.free(free_start, free_count * size / frame_size);
|
||||||
|
free_table_pages(table, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
page_manager::unmap_pages(void* address, size_t count, page_table *pml4)
|
||||||
|
{
|
||||||
|
if (!pml4) pml4 = get_pml4();
|
||||||
|
page_out(pml4, reinterpret_cast<uintptr_t>(address), count, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -186,7 +295,7 @@ page_manager::page_in(page_table *pml4, uintptr_t phys_addr, uintptr_t virt_addr
|
|||||||
tables[2]->get(idx[2]) == nullptr) {
|
tables[2]->get(idx[2]) == nullptr) {
|
||||||
// Do a 2MiB page instead
|
// Do a 2MiB page instead
|
||||||
tables[2]->entries[idx[2]] = phys_addr | flags | 0x80;
|
tables[2]->entries[idx[2]] = phys_addr | flags | 0x80;
|
||||||
phys_addr += page_size * 512;
|
phys_addr += frame_size * 512;
|
||||||
count -= 512;
|
count -= 512;
|
||||||
if (count == 0) return;
|
if (count == 0) return;
|
||||||
continue;
|
continue;
|
||||||
@@ -197,7 +306,7 @@ page_manager::page_in(page_table *pml4, uintptr_t phys_addr, uintptr_t virt_addr
|
|||||||
|
|
||||||
for (; idx[3] < 512; idx[3] += 1) {
|
for (; idx[3] < 512; idx[3] += 1) {
|
||||||
tables[3]->entries[idx[3]] = phys_addr | flags;
|
tables[3]->entries[idx[3]] = phys_addr | flags;
|
||||||
phys_addr += page_size;
|
phys_addr += frame_size;
|
||||||
if (--count == 0) return;
|
if (--count == 0) return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -208,26 +317,41 @@ page_manager::page_in(page_table *pml4, uintptr_t phys_addr, uintptr_t virt_addr
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
page_manager::page_out(page_table *pml4, uintptr_t virt_addr, size_t count)
|
page_manager::page_out(page_table *pml4, uintptr_t virt_addr, size_t count, bool free)
|
||||||
{
|
{
|
||||||
page_table_indices idx{virt_addr};
|
page_table_indices idx{virt_addr};
|
||||||
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
|
page_table *tables[4] = {pml4, nullptr, nullptr, nullptr};
|
||||||
|
|
||||||
|
bool found = false;
|
||||||
|
uintptr_t free_start = 0;
|
||||||
|
unsigned free_count = 0;
|
||||||
|
|
||||||
for (; idx[0] < 512; idx[0] += 1) {
|
for (; idx[0] < 512; idx[0] += 1) {
|
||||||
tables[1] = reinterpret_cast<page_table *>(
|
tables[1] = tables[0]->get(idx[0]);
|
||||||
tables[0]->entries[idx[0]] & ~0xfffull);
|
|
||||||
|
|
||||||
for (; idx[1] < 512; idx[1] += 1) {
|
for (; idx[1] < 512; idx[1] += 1) {
|
||||||
tables[2] = reinterpret_cast<page_table *>(
|
tables[2] = tables[1]->get(idx[1]);
|
||||||
tables[1]->entries[idx[1]] & ~0xfffull);
|
|
||||||
|
|
||||||
for (; idx[2] < 512; idx[2] += 1) {
|
for (; idx[2] < 512; idx[2] += 1) {
|
||||||
tables[3] = reinterpret_cast<page_table *>(
|
tables[3] = tables[2]->get(idx[2]);
|
||||||
tables[2]->entries[idx[2]] & ~0xfffull);
|
|
||||||
|
|
||||||
for (; idx[3] < 512; idx[3] += 1) {
|
for (; idx[3] < 512; idx[3] += 1) {
|
||||||
|
uintptr_t entry = tables[3]->entries[idx[3]] & ~0xfffull;
|
||||||
|
if (!found || entry != free_start + free_count * frame_size) {
|
||||||
|
if (found && free) m_frames.free(free_start, free_count);
|
||||||
|
free_start = tables[3]->entries[idx[3]] & ~0xfffull;
|
||||||
|
free_count = 1;
|
||||||
|
found = true;
|
||||||
|
} else {
|
||||||
|
free_count++;
|
||||||
|
}
|
||||||
|
|
||||||
tables[3]->entries[idx[3]] = 0;
|
tables[3]->entries[idx[3]] = 0;
|
||||||
if (--count == 0) return;
|
|
||||||
|
if (--count == 0) {
|
||||||
|
if (free) m_frames.free(free_start, free_count);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -237,40 +361,36 @@ page_manager::page_out(page_table *pml4, uintptr_t virt_addr, size_t count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
page_table::dump(int level, int max_index, uint64_t offset)
|
page_table::dump(page_table::level lvl, bool recurse)
|
||||||
{
|
{
|
||||||
console *cons = console::get();
|
console *cons = console::get();
|
||||||
|
|
||||||
cons->printf("\nLevel %d page table @ %lx (off %lx):\n", level, this, offset);
|
cons->printf("\nLevel %d page table @ %lx:\n", lvl, this);
|
||||||
for (int i=0; i<512; ++i) {
|
for (int i=0; i<512; ++i) {
|
||||||
uint64_t ent = entries[i];
|
uint64_t ent = entries[i];
|
||||||
if (ent == 0) continue;
|
|
||||||
|
|
||||||
if ((ent & 0x1) == 0) {
|
if ((ent & 0x1) == 0)
|
||||||
cons->printf(" %3d: %016lx NOT PRESENT\n", i, ent);
|
cons->printf(" %3d: %016lx NOT PRESENT\n", i, ent);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((level == 2 || level == 3) && (ent & 0x80) == 0x80) {
|
else if ((lvl == level::pdp || lvl == level::pd) && (ent & 0x80) == 0x80)
|
||||||
cons->printf(" %3d: %016lx -> Large page at %016lx\n", i, ent, ent & ~0xfffull);
|
cons->printf(" %3d: %016lx -> Large page at %016lx\n", i, ent, ent & ~0xfffull);
|
||||||
continue;
|
|
||||||
} else if (level == 1) {
|
else if (lvl == level::pt)
|
||||||
cons->printf(" %3d: %016lx -> Page at %016lx\n", i, ent, ent & ~0xfffull);
|
cons->printf(" %3d: %016lx -> Page at %016lx\n", i, ent, ent & ~0xfffull);
|
||||||
} else {
|
|
||||||
|
else
|
||||||
cons->printf(" %3d: %016lx -> Level %d table at %016lx\n",
|
cons->printf(" %3d: %016lx -> Level %d table at %016lx\n",
|
||||||
i, ent, level - 1, (ent & ~0xfffull) + offset);
|
i, ent, deeper(lvl), (ent & ~0xfffull) + page_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lvl != level::pt && recurse) {
|
||||||
|
for (int i=0; i<=512; ++i) {
|
||||||
|
if (is_large_page(lvl, i))
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (--level > 0) {
|
page_table *next = get(i);
|
||||||
for (int i=0; i<=max_index; ++i) {
|
if (next)
|
||||||
uint64_t ent = entries[i];
|
next->dump(deeper(lvl), true);
|
||||||
if ((ent & 0x1) == 0) continue;
|
|
||||||
if ((ent & 0x80)) continue;
|
|
||||||
|
|
||||||
page_table *next = reinterpret_cast<page_table *>((ent & ~0xffful) + offset);
|
|
||||||
next->dump(level, 511, offset);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,41 +5,30 @@
|
|||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "kutil/address_manager.h"
|
||||||
#include "kutil/enum_bitfields.h"
|
#include "kutil/enum_bitfields.h"
|
||||||
#include "kutil/frame_allocator.h"
|
#include "kutil/frame_allocator.h"
|
||||||
#include "kutil/linked_list.h"
|
#include "kutil/linked_list.h"
|
||||||
#include "kutil/slab_allocator.h"
|
#include "kutil/slab_allocator.h"
|
||||||
|
#include "kernel_memory.h"
|
||||||
|
#include "page_table.h"
|
||||||
|
|
||||||
struct page_table;
|
|
||||||
struct free_page_header;
|
struct free_page_header;
|
||||||
|
|
||||||
/// Manager for allocation and mapping of pages
|
/// Manager for allocation and mapping of pages
|
||||||
class page_manager
|
class page_manager
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// Size of a single page.
|
page_manager(
|
||||||
static const size_t page_size = 0x1000;
|
kutil::frame_allocator &frames,
|
||||||
|
kutil::address_manager &addrs);
|
||||||
/// Start of the higher half.
|
|
||||||
static const uintptr_t high_offset = 0xffffff0000000000;
|
|
||||||
|
|
||||||
/// Offset from physical where page tables are mapped.
|
|
||||||
static const uintptr_t page_offset = 0xffffff8000000000;
|
|
||||||
|
|
||||||
/// Initial process thread's stack address
|
|
||||||
static const uintptr_t initial_stack = 0x0000800000000000;
|
|
||||||
|
|
||||||
/// Initial process thread's stack size, in pages
|
|
||||||
static const unsigned initial_stack_pages = 1;
|
|
||||||
|
|
||||||
page_manager(kutil::frame_allocator &frames);
|
|
||||||
|
|
||||||
/// Helper to get the number of pages needed for a given number of bytes.
|
/// Helper to get the number of pages needed for a given number of bytes.
|
||||||
/// \arg bytes The number of bytes desired
|
/// \arg bytes The number of bytes desired
|
||||||
/// \returns The number of pages needed to contain the desired bytes
|
/// \returns The number of pages needed to contain the desired bytes
|
||||||
static inline size_t page_count(size_t bytes)
|
static inline size_t page_count(size_t bytes)
|
||||||
{
|
{
|
||||||
return (bytes - 1) / page_size + 1;
|
return (bytes - 1) / memory::frame_size + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to read the PML4 table from CR3.
|
/// Helper to read the PML4 table from CR3.
|
||||||
@@ -48,14 +37,14 @@ public:
|
|||||||
{
|
{
|
||||||
uintptr_t pml4 = 0;
|
uintptr_t pml4 = 0;
|
||||||
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) );
|
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) );
|
||||||
return reinterpret_cast<page_table *>((pml4 & ~0xfffull) + page_offset);
|
return reinterpret_cast<page_table *>((pml4 & ~0xfffull) + memory::page_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to set the PML4 table pointer in CR3.
|
/// Helper to set the PML4 table pointer in CR3.
|
||||||
/// \arg pml4 A pointer to the PML4 table to install.
|
/// \arg pml4 A pointer to the PML4 table to install.
|
||||||
static inline void set_pml4(page_table *pml4)
|
static inline void set_pml4(page_table *pml4)
|
||||||
{
|
{
|
||||||
uintptr_t p = reinterpret_cast<uintptr_t>(pml4) - page_offset;
|
uintptr_t p = reinterpret_cast<uintptr_t>(pml4) - memory::page_offset;
|
||||||
__asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p & ~0xfffull) );
|
__asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p & ~0xfffull) );
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,8 +53,16 @@ public:
|
|||||||
/// \returns A pointer to the PML4 table
|
/// \returns A pointer to the PML4 table
|
||||||
page_table * create_process_map();
|
page_table * create_process_map();
|
||||||
|
|
||||||
/// Deallocate a process' PML4 table.
|
/// Deallocate a process' PML4 table and entries.
|
||||||
void delete_process_map(page_table *table);
|
/// \arg pml4 The process' PML4 table
|
||||||
|
void delete_process_map(page_table *pml4);
|
||||||
|
|
||||||
|
/// Copy a process' memory mappings (and memory pages).
|
||||||
|
/// \arg from Page table to copy from
|
||||||
|
/// \arg lvl Level of the given tables (default is PML4)
|
||||||
|
/// \returns The new page table
|
||||||
|
page_table * copy_table(page_table *from,
|
||||||
|
page_table::level lvl = page_table::level::pml4);
|
||||||
|
|
||||||
/// Allocate and map pages into virtual memory.
|
/// Allocate and map pages into virtual memory.
|
||||||
/// \arg address The virtual address at which to map the pages
|
/// \arg address The virtual address at which to map the pages
|
||||||
@@ -75,10 +72,11 @@ public:
|
|||||||
/// \returns A pointer to the start of the mapped region
|
/// \returns A pointer to the start of the mapped region
|
||||||
void * map_pages(uintptr_t address, size_t count, bool user = false, page_table *pml4 = nullptr);
|
void * map_pages(uintptr_t address, size_t count, bool user = false, page_table *pml4 = nullptr);
|
||||||
|
|
||||||
/// Unmap existing pages from memory.
|
/// Unmap and free existing pages from memory.
|
||||||
/// \arg address The virtual address of the memory to unmap
|
/// \arg address The virtual address of the memory to unmap
|
||||||
/// \arg count The number of pages to unmap
|
/// \arg count The number of pages to unmap
|
||||||
void unmap_pages(void *address, size_t count);
|
/// \arg pml4 The pml4 to unmap from - null for the current one
|
||||||
|
void unmap_pages(void *address, size_t count, page_table *pml4 = nullptr);
|
||||||
|
|
||||||
/// Offset-map a pointer. No physical pages will be mapped.
|
/// Offset-map a pointer. No physical pages will be mapped.
|
||||||
/// \arg pointer Pointer to a pointer to the memory area to be mapped
|
/// \arg pointer Pointer to a pointer to the memory area to be mapped
|
||||||
@@ -90,7 +88,7 @@ public:
|
|||||||
/// \returns Physical address of the memory pointed to by p
|
/// \returns Physical address of the memory pointed to by p
|
||||||
inline uintptr_t offset_phys(void *p) const
|
inline uintptr_t offset_phys(void *p) const
|
||||||
{
|
{
|
||||||
return reinterpret_cast<uintptr_t>(kutil::offset_pointer(p, -page_offset));
|
return reinterpret_cast<uintptr_t>(kutil::offset_pointer(p, -memory::page_offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the virtual address of an offset-mapped physical address
|
/// Get the virtual address of an offset-mapped physical address
|
||||||
@@ -98,19 +96,24 @@ public:
|
|||||||
/// \returns Virtual address of the memory at address a
|
/// \returns Virtual address of the memory at address a
|
||||||
inline void * offset_virt(uintptr_t a) const
|
inline void * offset_virt(uintptr_t a) const
|
||||||
{
|
{
|
||||||
return kutil::offset_pointer(reinterpret_cast<void *>(a), page_offset);
|
return kutil::offset_pointer(reinterpret_cast<void *>(a), memory::page_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dump the given or current PML4 to the console
|
/// Dump the given or current PML4 to the console
|
||||||
/// \arg pml4 The page table to use, null for the current one
|
/// \arg pml4 The page table to use, null for the current one
|
||||||
/// \arg max_index The max index of pml4 to print
|
/// \arg recurse Whether to print sub-tables
|
||||||
void dump_pml4(page_table *pml4 = nullptr, int max_index = 511);
|
void dump_pml4(page_table *pml4 = nullptr, bool recurse = true);
|
||||||
|
|
||||||
/// Get the system page manager.
|
/// Get the system page manager.
|
||||||
/// \returns A pointer to the system page manager
|
/// \returns A pointer to the system page manager
|
||||||
static page_manager * get();
|
static page_manager * get();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
/// Copy a physical page
|
||||||
|
/// \arg orig Physical address of the page to copy
|
||||||
|
/// \returns Physical address of the new page
|
||||||
|
uintptr_t copy_page(uintptr_t orig);
|
||||||
|
|
||||||
/// Allocate a page for a page table, or pull one from the cache
|
/// Allocate a page for a page table, or pull one from the cache
|
||||||
/// \returns An empty page mapped in page space
|
/// \returns An empty page mapped in page space
|
||||||
page_table * get_table_page();
|
page_table * get_table_page();
|
||||||
@@ -145,14 +148,21 @@ private:
|
|||||||
/// \arg pml4 The root page table for this mapping
|
/// \arg pml4 The root page table for this mapping
|
||||||
/// \arg virt_addr The starting virtual address ot the memory to be unmapped
|
/// \arg virt_addr The starting virtual address ot the memory to be unmapped
|
||||||
/// \arg count The number of pages to unmap
|
/// \arg count The number of pages to unmap
|
||||||
|
/// \arg free Whether to return the pages to the frame allocator
|
||||||
void page_out(
|
void page_out(
|
||||||
page_table *pml4,
|
page_table *pml4,
|
||||||
uintptr_t virt_addr,
|
uintptr_t virt_addr,
|
||||||
size_t count);
|
size_t count,
|
||||||
|
bool free = false);
|
||||||
|
|
||||||
|
/// Low-level routine for unmapping an entire table of memory at once
|
||||||
|
void unmap_table(page_table *table, page_table::level lvl, bool free);
|
||||||
|
|
||||||
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
|
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
|
||||||
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
|
free_page_header *m_page_cache; ///< Cache of free pages to use for tables
|
||||||
|
|
||||||
kutil::frame_allocator &m_frames;
|
kutil::frame_allocator &m_frames;
|
||||||
|
kutil::address_manager &m_addrs;
|
||||||
|
|
||||||
friend void memory_initialize(uint16_t, const void *, size_t, size_t);
|
friend void memory_initialize(uint16_t, const void *, size_t, size_t);
|
||||||
page_manager(const page_manager &) = delete;
|
page_manager(const page_manager &) = delete;
|
||||||
@@ -164,44 +174,6 @@ extern page_manager g_page_manager;
|
|||||||
inline page_manager * page_manager::get() { return &g_page_manager; }
|
inline page_manager * page_manager::get() { return &g_page_manager; }
|
||||||
|
|
||||||
|
|
||||||
/// Struct to allow easy accessing of a memory page being used as a page table.
|
|
||||||
struct page_table
|
|
||||||
{
|
|
||||||
using pm = page_manager;
|
|
||||||
|
|
||||||
uint64_t entries[512];
|
|
||||||
|
|
||||||
inline page_table * get(int i) const {
|
|
||||||
uint64_t entry = entries[i];
|
|
||||||
if ((entry & 0x1) == 0) return nullptr;
|
|
||||||
return reinterpret_cast<page_table *>((entry & ~0xfffull) + pm::page_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void set(int i, page_table *p, uint16_t flags) {
|
|
||||||
entries[i] = (reinterpret_cast<uint64_t>(p) - pm::page_offset) | (flags & 0xfff);
|
|
||||||
}
|
|
||||||
|
|
||||||
void dump(int level = 4, int max_index = 511, uint64_t offset = page_manager::page_offset);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/// Helper struct for computing page table indices of a given address.
|
|
||||||
struct page_table_indices
|
|
||||||
{
|
|
||||||
page_table_indices(uint64_t v = 0);
|
|
||||||
|
|
||||||
uintptr_t addr() const;
|
|
||||||
|
|
||||||
inline operator uintptr_t() const { return addr(); }
|
|
||||||
|
|
||||||
/// Get the index for a given level of page table.
|
|
||||||
uint64_t & operator[](size_t i) { return index[i]; }
|
|
||||||
uint64_t operator[](size_t i) const { return index[i]; }
|
|
||||||
uint64_t index[4]; ///< Indices for each level of tables.
|
|
||||||
};
|
|
||||||
|
|
||||||
bool operator==(const page_table_indices &l, const page_table_indices &r);
|
|
||||||
|
|
||||||
/// Calculate a page-aligned address.
|
/// Calculate a page-aligned address.
|
||||||
/// \arg p The address to align.
|
/// \arg p The address to align.
|
||||||
/// \returns The next page-aligned address _after_ `p`.
|
/// \returns The next page-aligned address _after_ `p`.
|
||||||
@@ -209,8 +181,8 @@ template <typename T> inline T
|
|||||||
page_align(T p)
|
page_align(T p)
|
||||||
{
|
{
|
||||||
return reinterpret_cast<T>(
|
return reinterpret_cast<T>(
|
||||||
((reinterpret_cast<uintptr_t>(p) - 1) & ~(page_manager::page_size - 1))
|
((reinterpret_cast<uintptr_t>(p) - 1) & ~(memory::frame_size - 1))
|
||||||
+ page_manager::page_size);
|
+ memory::frame_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate a page-table-aligned address. That is, an address that is
|
/// Calculate a page-table-aligned address. That is, an address that is
|
||||||
@@ -224,11 +196,5 @@ page_table_align(T p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Calculate the number of pages needed for the give number of bytes.
|
|
||||||
/// \arg n Number of bytes
|
|
||||||
/// \returns Number of pages
|
|
||||||
inline size_t page_count(size_t n) { return ((n - 1) / page_manager::page_size) + 1; }
|
|
||||||
|
|
||||||
|
|
||||||
/// Bootstrap the memory managers.
|
/// Bootstrap the memory managers.
|
||||||
void memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length);
|
void memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length);
|
||||||
|
|||||||
63
src/kernel/page_table.h
Normal file
63
src/kernel/page_table.h
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
#pragma once
|
||||||
|
/// \file page_table.h
|
||||||
|
/// Helper structures for dealing with page tables.
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include "kernel_memory.h"
|
||||||
|
|
||||||
|
class page_manager;
|
||||||
|
|
||||||
|
/// Struct to allow easy accessing of a memory page being used as a page table.
|
||||||
|
struct page_table
|
||||||
|
{
|
||||||
|
enum class level : unsigned { pml4, pdp, pd, pt };
|
||||||
|
inline static level deeper(level l) {
|
||||||
|
return static_cast<level>(static_cast<unsigned>(l) + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t entries[512];
|
||||||
|
|
||||||
|
inline page_table * get(int i, uint16_t *flags = nullptr) const {
|
||||||
|
uint64_t entry = entries[i];
|
||||||
|
if ((entry & 0x1) == 0) return nullptr;
|
||||||
|
if (flags) *flags = entry & 0xfffull;
|
||||||
|
return reinterpret_cast<page_table *>((entry & ~0xfffull) + memory::page_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void set(int i, page_table *p, uint16_t flags) {
|
||||||
|
entries[i] = (reinterpret_cast<uint64_t>(p) - memory::page_offset) | (flags & 0xfff);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool is_present(int i) const { return (entries[i] & 0x1) == 0x1; }
|
||||||
|
|
||||||
|
inline bool is_large_page(level l, int i) const {
|
||||||
|
return
|
||||||
|
(l == level::pdp || l == level::pd) &&
|
||||||
|
(entries[i] & 0x80) == 0x80;
|
||||||
|
}
|
||||||
|
|
||||||
|
void dump(
|
||||||
|
level lvl = level::pml4,
|
||||||
|
bool recurse = true);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// Helper struct for computing page table indices of a given address.
|
||||||
|
struct page_table_indices
|
||||||
|
{
|
||||||
|
page_table_indices(uint64_t v = 0);
|
||||||
|
|
||||||
|
uintptr_t addr() const;
|
||||||
|
|
||||||
|
inline operator uintptr_t() const { return addr(); }
|
||||||
|
|
||||||
|
/// Get the index for a given level of page table.
|
||||||
|
uint64_t & operator[](int i) { return index[i]; }
|
||||||
|
uint64_t operator[](int i) const { return index[i]; }
|
||||||
|
uint64_t & operator[](page_table::level i) { return index[static_cast<unsigned>(i)]; }
|
||||||
|
uint64_t operator[](page_table::level i) const { return index[static_cast<unsigned>(i)]; }
|
||||||
|
uint64_t index[4]; ///< Indices for each level of tables.
|
||||||
|
};
|
||||||
|
|
||||||
|
bool operator==(const page_table_indices &l, const page_table_indices &r);
|
||||||
|
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
#include "gdt.h"
|
#include "gdt.h"
|
||||||
#include "interrupts.h"
|
#include "interrupts.h"
|
||||||
#include "io.h"
|
#include "io.h"
|
||||||
|
#include "kernel_memory.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "msr.h"
|
#include "msr.h"
|
||||||
#include "page_manager.h"
|
#include "page_manager.h"
|
||||||
@@ -12,6 +13,8 @@
|
|||||||
#include "elf/elf.h"
|
#include "elf/elf.h"
|
||||||
#include "kutil/assert.h"
|
#include "kutil/assert.h"
|
||||||
|
|
||||||
|
using memory::initial_stack;
|
||||||
|
|
||||||
scheduler scheduler::s_instance(nullptr);
|
scheduler scheduler::s_instance(nullptr);
|
||||||
|
|
||||||
const int stack_size = 0x1000;
|
const int stack_size = 0x1000;
|
||||||
@@ -67,7 +70,7 @@ load_process(const void *image_start, size_t bytes, process *proc, cpu_state sta
|
|||||||
if (header->type != elf::segment_type::load)
|
if (header->type != elf::segment_type::load)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
uintptr_t aligned = header->vaddr & ~(page_manager::page_size - 1);
|
uintptr_t aligned = header->vaddr & ~(memory::frame_size - 1);
|
||||||
size_t size = (header->vaddr + header->mem_size) - aligned;
|
size_t size = (header->vaddr + header->mem_size) - aligned;
|
||||||
size_t pages = page_manager::page_count(size);
|
size_t pages = page_manager::page_count(size);
|
||||||
|
|
||||||
@@ -80,7 +83,7 @@ load_process(const void *image_start, size_t bytes, process *proc, cpu_state sta
|
|||||||
void *mapped = pager->map_pages(aligned, pages, true);
|
void *mapped = pager->map_pages(aligned, pages, true);
|
||||||
kassert(mapped, "Tried to map userspace pages and failed!");
|
kassert(mapped, "Tried to map userspace pages and failed!");
|
||||||
|
|
||||||
kutil::memset(mapped, 0, pages * page_manager::page_size);
|
kutil::memset(mapped, 0, pages * memory::frame_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
const unsigned section_count = image.section_count();
|
const unsigned section_count = image.section_count();
|
||||||
@@ -132,7 +135,7 @@ scheduler::create_process(const char *name, const void *data, size_t size)
|
|||||||
state->cs = cs;
|
state->cs = cs;
|
||||||
state->rflags = rflags_int;
|
state->rflags = rflags_int;
|
||||||
state->rip = 0; // to be filled by the loader
|
state->rip = 0; // to be filled by the loader
|
||||||
state->user_rsp = page_manager::initial_stack;
|
state->user_rsp = initial_stack;
|
||||||
|
|
||||||
// Next state in the stack is the loader's kernel stack. The scheduler will
|
// Next state in the stack is the loader's kernel stack. The scheduler will
|
||||||
// iret to this which will kick off the loading:
|
// iret to this which will kick off the loading:
|
||||||
|
|||||||
Reference in New Issue
Block a user