[memory] Rework memory_initialize for new loader
Created a new `memory_initialize()` function that uses the new-style kernel args structure from the new bootloader. Additionally: * Fixed a hard-coded interrupt EOI address that didn't work with new memory locations * Make the `page_manager::fault_handler()` automatically grant pages in the kernel heap Tags: boot page fault
This commit is contained in:
@@ -1,5 +1,7 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "kernel_memory.h"
|
||||||
|
|
||||||
#include "apic.h"
|
#include "apic.h"
|
||||||
#include "console.h"
|
#include "console.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
@@ -15,6 +17,8 @@
|
|||||||
static const uint16_t PIC1 = 0x20;
|
static const uint16_t PIC1 = 0x20;
|
||||||
static const uint16_t PIC2 = 0xa0;
|
static const uint16_t PIC2 = 0xa0;
|
||||||
|
|
||||||
|
constexpr uintptr_t apic_eoi_addr = 0xfee000b0 + ::memory::page_offset;
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
void _halt();
|
void _halt();
|
||||||
|
|
||||||
@@ -273,7 +277,7 @@ isr_handler(cpu_state *regs)
|
|||||||
print_stacktrace(2);
|
print_stacktrace(2);
|
||||||
_halt();
|
_halt();
|
||||||
}
|
}
|
||||||
*reinterpret_cast<uint32_t *>(0xffffff80fee000b0) = 0;
|
*reinterpret_cast<uint32_t *>(apic_eoi_addr) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -290,5 +294,5 @@ irq_handler(cpu_state *regs)
|
|||||||
_halt();
|
_halt();
|
||||||
}
|
}
|
||||||
|
|
||||||
*reinterpret_cast<uint32_t *>(0xffffff80fee000b0) = 0;
|
*reinterpret_cast<uint32_t *>(apic_eoi_addr) = 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -84,13 +84,9 @@ kernel_main(args::header *header)
|
|||||||
cpu_id cpu;
|
cpu_id cpu;
|
||||||
cpu.validate();
|
cpu.validate();
|
||||||
|
|
||||||
/*
|
memory_initialize(header);
|
||||||
memory_initialize(
|
|
||||||
header->scratch_pages,
|
|
||||||
header->memory_map,
|
|
||||||
header->memory_map_length,
|
|
||||||
header->memory_map_desc_size);
|
|
||||||
|
|
||||||
|
/*
|
||||||
kutil::allocator &heap = g_kernel_heap;
|
kutil::allocator &heap = g_kernel_heap;
|
||||||
|
|
||||||
if (header->frame_buffer && header->frame_buffer_length) {
|
if (header->frame_buffer && header->frame_buffer_length) {
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include "kernel_args.h"
|
||||||
|
|
||||||
#include "kutil/assert.h"
|
#include "kutil/assert.h"
|
||||||
#include "kutil/heap_allocator.h"
|
#include "kutil/heap_allocator.h"
|
||||||
#include "kutil/vm_space.h"
|
#include "kutil/vm_space.h"
|
||||||
@@ -16,232 +18,98 @@ using memory::kernel_max_heap;
|
|||||||
using memory::kernel_offset;
|
using memory::kernel_offset;
|
||||||
using memory::page_offset;
|
using memory::page_offset;
|
||||||
|
|
||||||
kutil::vm_space g_kspace;
|
using namespace kernel;
|
||||||
|
|
||||||
|
kutil::vm_space g_kernel_space;
|
||||||
kutil::heap_allocator g_kernel_heap;
|
kutil::heap_allocator g_kernel_heap;
|
||||||
bool g_memory_initialized = false;
|
|
||||||
|
|
||||||
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
|
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
|
||||||
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
|
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
|
||||||
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
|
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
|
||||||
void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); }
|
void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); }
|
||||||
|
|
||||||
enum class efi_memory_type : uint32_t
|
void walk_page_table(
|
||||||
|
page_table *table,
|
||||||
|
page_table::level level,
|
||||||
|
uintptr_t ¤t_start,
|
||||||
|
size_t ¤t_bytes,
|
||||||
|
kutil::vm_space &kspace)
|
||||||
{
|
{
|
||||||
reserved,
|
constexpr size_t huge_page_size = (1ull<<30);
|
||||||
loader_code,
|
constexpr size_t large_page_size = (1ull<<21);
|
||||||
loader_data,
|
|
||||||
boot_services_code,
|
|
||||||
boot_services_data,
|
|
||||||
runtime_services_code,
|
|
||||||
runtime_services_data,
|
|
||||||
available,
|
|
||||||
unusable,
|
|
||||||
acpi_reclaim,
|
|
||||||
acpi_nvs,
|
|
||||||
mmio,
|
|
||||||
mmio_port,
|
|
||||||
pal_code,
|
|
||||||
persistent,
|
|
||||||
|
|
||||||
efi_max,
|
for (unsigned i = 0; i < 512; ++i) {
|
||||||
|
page_table *next = table->get(i);
|
||||||
|
if (!next) {
|
||||||
|
kspace.commit(current_start, current_bytes);
|
||||||
|
current_start = 0;
|
||||||
|
current_bytes = 0;
|
||||||
|
continue;
|
||||||
|
} else if (table->is_page(level, i)) {
|
||||||
|
if (!current_bytes)
|
||||||
|
current_start = reinterpret_cast<uintptr_t>(next);
|
||||||
|
current_bytes +=
|
||||||
|
(level == page_table::level::pt
|
||||||
|
? frame_size
|
||||||
|
: level == page_table::level::pd
|
||||||
|
? large_page_size
|
||||||
|
: huge_page_size);
|
||||||
|
} else {
|
||||||
|
page_table::level deeper =
|
||||||
|
static_cast<page_table::level>(
|
||||||
|
static_cast<unsigned>(level) + 1);
|
||||||
|
|
||||||
jsix_kernel = 0x80000000,
|
walk_page_table(
|
||||||
jsix_data,
|
next, deeper, current_start, current_bytes, kspace);
|
||||||
jsix_initrd,
|
|
||||||
jsix_scratch,
|
|
||||||
|
|
||||||
jsix_max
|
|
||||||
};
|
|
||||||
|
|
||||||
struct efi_memory_descriptor
|
|
||||||
{
|
|
||||||
efi_memory_type type;
|
|
||||||
uint32_t pad;
|
|
||||||
uint64_t physical_start;
|
|
||||||
uint64_t virtual_start;
|
|
||||||
uint64_t pages;
|
|
||||||
uint64_t flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct memory_map
|
|
||||||
{
|
|
||||||
memory_map(const void *efi_map, size_t map_length, size_t desc_length) :
|
|
||||||
efi_map(efi_map), map_length(map_length), desc_length(desc_length) {}
|
|
||||||
|
|
||||||
class iterator
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
iterator(const memory_map &map, efi_memory_descriptor const *item) :
|
|
||||||
map(map), item(item) {}
|
|
||||||
|
|
||||||
inline efi_memory_descriptor const * operator*() const { return item; }
|
|
||||||
inline bool operator!=(const iterator &other) { return item != other.item; }
|
|
||||||
inline iterator & operator++() {
|
|
||||||
item = kutil::offset_pointer(item, map.desc_length);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const memory_map ↦
|
|
||||||
efi_memory_descriptor const *item;
|
|
||||||
};
|
|
||||||
|
|
||||||
iterator begin() const {
|
|
||||||
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(efi_map));
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator end() const {
|
|
||||||
const void *end = kutil::offset_pointer(efi_map, map_length);
|
|
||||||
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(end));
|
|
||||||
}
|
|
||||||
|
|
||||||
const void *efi_map;
|
|
||||||
size_t map_length;
|
|
||||||
size_t desc_length;
|
|
||||||
};
|
|
||||||
|
|
||||||
class memory_bootstrap
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
memory_bootstrap(const void *memory_map, size_t map_length, size_t desc_length) :
|
|
||||||
map(memory_map, map_length, desc_length) {}
|
|
||||||
|
|
||||||
void add_free_frames(frame_allocator &fa) {
|
|
||||||
for (auto *desc : map) {
|
|
||||||
if (desc->type == efi_memory_type::loader_code ||
|
|
||||||
desc->type == efi_memory_type::loader_data ||
|
|
||||||
desc->type == efi_memory_type::boot_services_code ||
|
|
||||||
desc->type == efi_memory_type::boot_services_data ||
|
|
||||||
desc->type == efi_memory_type::available)
|
|
||||||
{
|
|
||||||
fa.free(desc->physical_start, desc->pages);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
void add_used_frames(kutil::vm_space &vm) {
|
|
||||||
for (auto *desc : map) {
|
|
||||||
if (desc->type == efi_memory_type::jsix_data ||
|
|
||||||
desc->type == efi_memory_type::jsix_initrd ||
|
|
||||||
desc->type == efi_memory_type::jsix_kernel)
|
|
||||||
{
|
|
||||||
uintptr_t virt_addr = desc->physical_start + kernel_offset;
|
|
||||||
vm.commit(virt_addr, desc->pages * frame_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void page_in_kernel(page_manager &pm, page_table *pml4) {
|
|
||||||
for (auto *desc : map) {
|
|
||||||
if (desc->type == efi_memory_type::jsix_kernel ||
|
|
||||||
desc->type == efi_memory_type::jsix_data ||
|
|
||||||
desc->type == efi_memory_type::jsix_initrd)
|
|
||||||
{
|
|
||||||
uintptr_t virt_addr = desc->physical_start + kernel_offset;
|
|
||||||
pm.page_in(pml4, desc->physical_start, virt_addr, desc->pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (desc->type == efi_memory_type::acpi_reclaim) {
|
|
||||||
pm.page_in(pml4, desc->physical_start, desc->physical_start, desc->pages);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put our new PML4 into CR3 to start using it
|
|
||||||
page_manager::set_pml4(pml4);
|
|
||||||
pm.m_kernel_pml4 = pml4;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const memory_map map;
|
|
||||||
};
|
|
||||||
|
|
||||||
void
|
void
|
||||||
memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length)
|
memory_initialize(args::header *kargs)
|
||||||
{
|
{
|
||||||
g_memory_initialized = false;
|
args::mem_entry *entries = kargs->mem_map;
|
||||||
|
size_t entry_count = kargs->num_map_entries;
|
||||||
|
|
||||||
// make sure the options we want in CR4 are set
|
new (&g_kernel_heap) kutil::heap_allocator {heap_start, kernel_max_heap};
|
||||||
uint64_t cr4;
|
|
||||||
__asm__ __volatile__ ( "mov %%cr4, %0" : "=r" (cr4) );
|
|
||||||
cr4 |=
|
|
||||||
0x000080 | // Enable global pages
|
|
||||||
0x000200 | // Enable FXSAVE/FXRSTOR
|
|
||||||
0x010000 | // Enable FSGSBASE
|
|
||||||
0x020000 | // Enable PCIDs
|
|
||||||
0;
|
|
||||||
__asm__ __volatile__ ( "mov %0, %%cr4" :: "r" (cr4) );
|
|
||||||
|
|
||||||
// The bootloader reserved "scratch_pages" pages for page tables and
|
|
||||||
// scratch space, which we'll use to bootstrap. The first one is the
|
|
||||||
// already-installed PML4, so grab it from CR3.
|
|
||||||
uint64_t scratch_phys;
|
|
||||||
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (scratch_phys) );
|
|
||||||
scratch_phys &= ~0xfffull;
|
|
||||||
|
|
||||||
// The tables are ident-mapped currently, so the cr3 physical address works. But let's
|
|
||||||
// get them into the offset-mapped area asap.
|
|
||||||
page_table *tables = reinterpret_cast<page_table *>(scratch_phys);
|
|
||||||
|
|
||||||
page_table *id_pml4 = &tables[0];
|
|
||||||
page_table *id_pdp = &tables[1];
|
|
||||||
|
|
||||||
// Flags: 0 0 0 0 1 1 0 0 0 0 0 1 1 = 0x0183
|
|
||||||
// | IGN | | | | | | | | +- Present
|
|
||||||
// | | | | | | | | +--- Writeable
|
|
||||||
// | | | | | | | +----- Supervisor only
|
|
||||||
// | | | | | | +------- PWT (determining memory type for page)
|
|
||||||
// | | | | | +---------- PCD (determining memory type for page)
|
|
||||||
// | | | | +------------ Accessed flag (not accessed yet)
|
|
||||||
// | | | +-------------- Dirty (not dirtied yet)
|
|
||||||
// | | +---------------- Page size (1GiB page)
|
|
||||||
// | +------------------- Global
|
|
||||||
// +---------------------------- PAT (determining memory type for page)
|
|
||||||
for (int i=0; i<512; ++i)
|
|
||||||
id_pdp->entries[i] = (static_cast<uintptr_t>(i) << 30) | 0x0183;
|
|
||||||
|
|
||||||
// Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003
|
|
||||||
// IGNORED | | | | | | | +- Present
|
|
||||||
// | | | | | | +--- Writeable
|
|
||||||
// | | | | | +----- Supervisor only
|
|
||||||
// | | | | +------- PWT (determining memory type for pdpt)
|
|
||||||
// | | | +---------- PCD (determining memory type for pdpt)
|
|
||||||
// | | +------------ Accessed flag (not accessed yet)
|
|
||||||
// | +-------------- Ignored
|
|
||||||
// +---------------- Reserved 0
|
|
||||||
id_pml4->entries[511] = reinterpret_cast<uintptr_t>(id_pdp) | 0x003;
|
|
||||||
|
|
||||||
// Make sure the page table is finished updating before we write to memory
|
|
||||||
__sync_synchronize();
|
|
||||||
io_wait();
|
|
||||||
|
|
||||||
uintptr_t scratch_virt = scratch_phys + page_offset;
|
|
||||||
memory_bootstrap bootstrap {memory_map, map_length, desc_length};
|
|
||||||
|
|
||||||
// Now tell the frame allocator what's free
|
|
||||||
frame_allocator *fa = new (&g_frame_allocator) frame_allocator;
|
frame_allocator *fa = new (&g_frame_allocator) frame_allocator;
|
||||||
bootstrap.add_free_frames(*fa);
|
for (unsigned i = 0; i < entry_count; ++i) {
|
||||||
|
// TODO: use entry attributes
|
||||||
new (&g_kernel_heap) kutil::heap_allocator(heap_start, kernel_max_heap);
|
args::mem_entry &e = entries[i];
|
||||||
new (&g_kspace) kutil::vm_space(kernel_offset, (page_offset-kernel_offset), g_kernel_heap);
|
if (e.type == args::mem_type::free)
|
||||||
bootstrap.add_used_frames(g_kspace);
|
fa->free(e.start, e.pages);
|
||||||
|
}
|
||||||
|
|
||||||
// Create the page manager
|
// Create the page manager
|
||||||
page_manager *pm = new (&g_page_manager) page_manager(*fa);
|
page_manager *pm = new (&g_page_manager) page_manager(*fa);
|
||||||
|
|
||||||
// Give the frame_allocator back the rest of the scratch pages
|
new (&g_kernel_space) kutil::vm_space {
|
||||||
fa->free(scratch_phys + (3 * frame_size), scratch_pages - 3);
|
kernel_offset,
|
||||||
|
(page_offset-kernel_offset),
|
||||||
|
g_kernel_heap};
|
||||||
|
|
||||||
// Finally, build an acutal set of kernel page tables where we'll only add
|
page_table *kpml4 = reinterpret_cast<page_table*>(kargs->pml4);
|
||||||
// what the kernel actually has mapped, but making everything writable
|
|
||||||
// (especially the page tables themselves)
|
|
||||||
page_table *pml4 = &tables[2];
|
|
||||||
pml4 = kutil::offset_pointer(pml4, page_offset);
|
|
||||||
|
|
||||||
kutil::memset(pml4, 0, sizeof(page_table));
|
uintptr_t current_start = 0;
|
||||||
pml4->entries[511] = reinterpret_cast<uintptr_t>(id_pdp) | 0x003;
|
size_t current_bytes = 0;
|
||||||
|
|
||||||
bootstrap.page_in_kernel(*pm, pml4);
|
// TODO: Should we exclude the top of this area? (eg, buffers, stacks, etc)
|
||||||
|
for (unsigned i = 256; i < 384; ++i) {
|
||||||
|
page_table *pdp = kpml4->get(i);
|
||||||
|
|
||||||
// Reclaim the old PML4
|
if (!pdp) {
|
||||||
fa->free(scratch_phys, 1);
|
g_kernel_space.commit(current_start, current_bytes);
|
||||||
|
current_start = 0;
|
||||||
|
current_bytes = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
walk_page_table(
|
||||||
|
pdp, page_table::level::pdp,
|
||||||
|
current_start, current_bytes,
|
||||||
|
g_kernel_space);
|
||||||
|
}
|
||||||
|
|
||||||
|
fa->free(reinterpret_cast<uintptr_t>(kargs->page_table_cache), kargs->num_free_tables);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ using memory::page_offset;
|
|||||||
using memory::page_mappable;
|
using memory::page_mappable;
|
||||||
|
|
||||||
page_manager g_page_manager(g_frame_allocator);
|
page_manager g_page_manager(g_frame_allocator);
|
||||||
extern kutil::vm_space g_kspace;
|
extern kutil::vm_space g_kernel_space;
|
||||||
|
|
||||||
// NB: in 4KiB page table entries, bit 7 isn't pagesize but PAT. Currently this
|
// NB: in 4KiB page table entries, bit 7 isn't pagesize but PAT. Currently this
|
||||||
// doesn't matter, becasue in the default PAT table, both 000 and 100 are WB.
|
// doesn't matter, becasue in the default PAT table, both 000 and 100 are WB.
|
||||||
@@ -47,8 +47,7 @@ struct free_page_header
|
|||||||
|
|
||||||
page_manager::page_manager(frame_allocator &frames) :
|
page_manager::page_manager(frame_allocator &frames) :
|
||||||
m_page_cache(nullptr),
|
m_page_cache(nullptr),
|
||||||
m_frames(frames),
|
m_frames(frames)
|
||||||
m_memory_initialized(false)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -322,8 +321,11 @@ page_manager::fault_handler(uintptr_t addr)
|
|||||||
if (!addr)
|
if (!addr)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (m_memory_initialized &&
|
bool is_heap = addr >= ::memory::heap_start &&
|
||||||
g_kspace.get(addr) != kutil::vm_state::committed)
|
addr < ::memory::heap_start + ::memory::kernel_max_heap;
|
||||||
|
|
||||||
|
if (!is_heap &&
|
||||||
|
g_kernel_space.get(addr) != kutil::vm_state::committed)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
uintptr_t page = addr & ~0xfffull;
|
uintptr_t page = addr & ~0xfffull;
|
||||||
|
|||||||
@@ -175,8 +175,6 @@ private:
|
|||||||
|
|
||||||
frame_allocator &m_frames;
|
frame_allocator &m_frames;
|
||||||
|
|
||||||
bool m_memory_initialized;
|
|
||||||
|
|
||||||
friend class memory_bootstrap;
|
friend class memory_bootstrap;
|
||||||
page_manager(const page_manager &) = delete;
|
page_manager(const page_manager &) = delete;
|
||||||
};
|
};
|
||||||
@@ -208,10 +206,11 @@ page_table_align(T p)
|
|||||||
return ((p - 1) & ~0x1fffffull) + 0x200000;
|
return ((p - 1) & ~0x1fffffull) + 0x200000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace kernel {
|
||||||
|
namespace args {
|
||||||
|
struct header;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Bootstrap the memory managers.
|
/// Bootstrap the memory managers.
|
||||||
void memory_initialize(
|
void memory_initialize(kernel::args::header *mem_map);
|
||||||
uint16_t scratch_pages,
|
|
||||||
const void *memory_map,
|
|
||||||
size_t map_length,
|
|
||||||
size_t desc_length);
|
|
||||||
|
|||||||
@@ -36,6 +36,10 @@ struct page_table
|
|||||||
(entries[i] & 0x80) == 0x80;
|
(entries[i] & 0x80) == 0x80;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool is_page(level l, int i) const {
|
||||||
|
return (l == level::pt) || is_large_page(l, i);
|
||||||
|
}
|
||||||
|
|
||||||
void dump(
|
void dump(
|
||||||
level lvl = level::pml4,
|
level lvl = level::pml4,
|
||||||
bool recurse = true);
|
bool recurse = true);
|
||||||
|
|||||||
Reference in New Issue
Block a user