[memory] Rework memory_initialize for new loader

Created a new `memory_initialize()` function that uses the new-style
kernel args structure from the new bootloader.

Additionally:
* Fixed a hard-coded interrupt EOI address that didn't work with new
  memory locations
* Make the `page_manager::fault_handler()` automatically grant pages
  in the kernel heap

Tags: boot page fault
This commit is contained in:
Justin C. Miller
2020-05-24 16:27:48 -07:00
parent fc3d919f25
commit 35b1d37df0
6 changed files with 95 additions and 222 deletions

View File

@@ -1,5 +1,7 @@
#include <stdint.h>
#include "kernel_memory.h"
#include "apic.h"
#include "console.h"
#include "cpu.h"
@@ -15,6 +17,8 @@
static const uint16_t PIC1 = 0x20;
static const uint16_t PIC2 = 0xa0;
constexpr uintptr_t apic_eoi_addr = 0xfee000b0 + ::memory::page_offset;
extern "C" {
void _halt();
@@ -273,7 +277,7 @@ isr_handler(cpu_state *regs)
print_stacktrace(2);
_halt();
}
*reinterpret_cast<uint32_t *>(0xffffff80fee000b0) = 0;
*reinterpret_cast<uint32_t *>(apic_eoi_addr) = 0;
}
void
@@ -290,5 +294,5 @@ irq_handler(cpu_state *regs)
_halt();
}
*reinterpret_cast<uint32_t *>(0xffffff80fee000b0) = 0;
*reinterpret_cast<uint32_t *>(apic_eoi_addr) = 0;
}

View File

@@ -84,13 +84,9 @@ kernel_main(args::header *header)
cpu_id cpu;
cpu.validate();
/*
memory_initialize(
header->scratch_pages,
header->memory_map,
header->memory_map_length,
header->memory_map_desc_size);
memory_initialize(header);
/*
kutil::allocator &heap = g_kernel_heap;
if (header->frame_buffer && header->frame_buffer_length) {

View File

@@ -1,6 +1,8 @@
#include <algorithm>
#include <utility>
#include "kernel_args.h"
#include "kutil/assert.h"
#include "kutil/heap_allocator.h"
#include "kutil/vm_space.h"
@@ -16,232 +18,98 @@ using memory::kernel_max_heap;
using memory::kernel_offset;
using memory::page_offset;
kutil::vm_space g_kspace;
using namespace kernel;
kutil::vm_space g_kernel_space;
kutil::heap_allocator g_kernel_heap;
bool g_memory_initialized = false;
void * operator new(size_t size) { return g_kernel_heap.allocate(size); }
void * operator new [] (size_t size) { return g_kernel_heap.allocate(size); }
void operator delete (void *p) noexcept { return g_kernel_heap.free(p); }
void operator delete [] (void *p) noexcept { return g_kernel_heap.free(p); }
enum class efi_memory_type : uint32_t
void walk_page_table(
page_table *table,
page_table::level level,
uintptr_t &current_start,
size_t &current_bytes,
kutil::vm_space &kspace)
{
reserved,
loader_code,
loader_data,
boot_services_code,
boot_services_data,
runtime_services_code,
runtime_services_data,
available,
unusable,
acpi_reclaim,
acpi_nvs,
mmio,
mmio_port,
pal_code,
persistent,
constexpr size_t huge_page_size = (1ull<<30);
constexpr size_t large_page_size = (1ull<<21);
efi_max,
for (unsigned i = 0; i < 512; ++i) {
page_table *next = table->get(i);
if (!next) {
kspace.commit(current_start, current_bytes);
current_start = 0;
current_bytes = 0;
continue;
} else if (table->is_page(level, i)) {
if (!current_bytes)
current_start = reinterpret_cast<uintptr_t>(next);
current_bytes +=
(level == page_table::level::pt
? frame_size
: level == page_table::level::pd
? large_page_size
: huge_page_size);
} else {
page_table::level deeper =
static_cast<page_table::level>(
static_cast<unsigned>(level) + 1);
jsix_kernel = 0x80000000,
jsix_data,
jsix_initrd,
jsix_scratch,
jsix_max
};
struct efi_memory_descriptor
{
efi_memory_type type;
uint32_t pad;
uint64_t physical_start;
uint64_t virtual_start;
uint64_t pages;
uint64_t flags;
};
struct memory_map
{
memory_map(const void *efi_map, size_t map_length, size_t desc_length) :
efi_map(efi_map), map_length(map_length), desc_length(desc_length) {}
class iterator
{
public:
iterator(const memory_map &map, efi_memory_descriptor const *item) :
map(map), item(item) {}
inline efi_memory_descriptor const * operator*() const { return item; }
inline bool operator!=(const iterator &other) { return item != other.item; }
inline iterator & operator++() {
item = kutil::offset_pointer(item, map.desc_length);
return *this;
}
private:
const memory_map &map;
efi_memory_descriptor const *item;
};
iterator begin() const {
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(efi_map));
}
iterator end() const {
const void *end = kutil::offset_pointer(efi_map, map_length);
return iterator(*this, reinterpret_cast<efi_memory_descriptor const *>(end));
}
const void *efi_map;
size_t map_length;
size_t desc_length;
};
class memory_bootstrap
{
public:
memory_bootstrap(const void *memory_map, size_t map_length, size_t desc_length) :
map(memory_map, map_length, desc_length) {}
void add_free_frames(frame_allocator &fa) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::loader_code ||
desc->type == efi_memory_type::loader_data ||
desc->type == efi_memory_type::boot_services_code ||
desc->type == efi_memory_type::boot_services_data ||
desc->type == efi_memory_type::available)
{
fa.free(desc->physical_start, desc->pages);
walk_page_table(
next, deeper, current_start, current_bytes, kspace);
}
}
}
void add_used_frames(kutil::vm_space &vm) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::jsix_data ||
desc->type == efi_memory_type::jsix_initrd ||
desc->type == efi_memory_type::jsix_kernel)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
vm.commit(virt_addr, desc->pages * frame_size);
}
}
}
void page_in_kernel(page_manager &pm, page_table *pml4) {
for (auto *desc : map) {
if (desc->type == efi_memory_type::jsix_kernel ||
desc->type == efi_memory_type::jsix_data ||
desc->type == efi_memory_type::jsix_initrd)
{
uintptr_t virt_addr = desc->physical_start + kernel_offset;
pm.page_in(pml4, desc->physical_start, virt_addr, desc->pages);
}
if (desc->type == efi_memory_type::acpi_reclaim) {
pm.page_in(pml4, desc->physical_start, desc->physical_start, desc->pages);
}
}
// Put our new PML4 into CR3 to start using it
page_manager::set_pml4(pml4);
pm.m_kernel_pml4 = pml4;
}
private:
const memory_map map;
};
void
memory_initialize(uint16_t scratch_pages, const void *memory_map, size_t map_length, size_t desc_length)
memory_initialize(args::header *kargs)
{
g_memory_initialized = false;
args::mem_entry *entries = kargs->mem_map;
size_t entry_count = kargs->num_map_entries;
// make sure the options we want in CR4 are set
uint64_t cr4;
__asm__ __volatile__ ( "mov %%cr4, %0" : "=r" (cr4) );
cr4 |=
0x000080 | // Enable global pages
0x000200 | // Enable FXSAVE/FXRSTOR
0x010000 | // Enable FSGSBASE
0x020000 | // Enable PCIDs
0;
__asm__ __volatile__ ( "mov %0, %%cr4" :: "r" (cr4) );
new (&g_kernel_heap) kutil::heap_allocator {heap_start, kernel_max_heap};
// The bootloader reserved "scratch_pages" pages for page tables and
// scratch space, which we'll use to bootstrap. The first one is the
// already-installed PML4, so grab it from CR3.
uint64_t scratch_phys;
__asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (scratch_phys) );
scratch_phys &= ~0xfffull;
// The tables are ident-mapped currently, so the cr3 physical address works. But let's
// get them into the offset-mapped area asap.
page_table *tables = reinterpret_cast<page_table *>(scratch_phys);
page_table *id_pml4 = &tables[0];
page_table *id_pdp = &tables[1];
// Flags: 0 0 0 0 1 1 0 0 0 0 0 1 1 = 0x0183
// | IGN | | | | | | | | +- Present
// | | | | | | | | +--- Writeable
// | | | | | | | +----- Supervisor only
// | | | | | | +------- PWT (determining memory type for page)
// | | | | | +---------- PCD (determining memory type for page)
// | | | | +------------ Accessed flag (not accessed yet)
// | | | +-------------- Dirty (not dirtied yet)
// | | +---------------- Page size (1GiB page)
// | +------------------- Global
// +---------------------------- PAT (determining memory type for page)
for (int i=0; i<512; ++i)
id_pdp->entries[i] = (static_cast<uintptr_t>(i) << 30) | 0x0183;
// Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003
// IGNORED | | | | | | | +- Present
// | | | | | | +--- Writeable
// | | | | | +----- Supervisor only
// | | | | +------- PWT (determining memory type for pdpt)
// | | | +---------- PCD (determining memory type for pdpt)
// | | +------------ Accessed flag (not accessed yet)
// | +-------------- Ignored
// +---------------- Reserved 0
id_pml4->entries[511] = reinterpret_cast<uintptr_t>(id_pdp) | 0x003;
// Make sure the page table is finished updating before we write to memory
__sync_synchronize();
io_wait();
uintptr_t scratch_virt = scratch_phys + page_offset;
memory_bootstrap bootstrap {memory_map, map_length, desc_length};
// Now tell the frame allocator what's free
frame_allocator *fa = new (&g_frame_allocator) frame_allocator;
bootstrap.add_free_frames(*fa);
new (&g_kernel_heap) kutil::heap_allocator(heap_start, kernel_max_heap);
new (&g_kspace) kutil::vm_space(kernel_offset, (page_offset-kernel_offset), g_kernel_heap);
bootstrap.add_used_frames(g_kspace);
for (unsigned i = 0; i < entry_count; ++i) {
// TODO: use entry attributes
args::mem_entry &e = entries[i];
if (e.type == args::mem_type::free)
fa->free(e.start, e.pages);
}
// Create the page manager
page_manager *pm = new (&g_page_manager) page_manager(*fa);
// Give the frame_allocator back the rest of the scratch pages
fa->free(scratch_phys + (3 * frame_size), scratch_pages - 3);
new (&g_kernel_space) kutil::vm_space {
kernel_offset,
(page_offset-kernel_offset),
g_kernel_heap};
// Finally, build an acutal set of kernel page tables where we'll only add
// what the kernel actually has mapped, but making everything writable
// (especially the page tables themselves)
page_table *pml4 = &tables[2];
pml4 = kutil::offset_pointer(pml4, page_offset);
page_table *kpml4 = reinterpret_cast<page_table*>(kargs->pml4);
kutil::memset(pml4, 0, sizeof(page_table));
pml4->entries[511] = reinterpret_cast<uintptr_t>(id_pdp) | 0x003;
uintptr_t current_start = 0;
size_t current_bytes = 0;
bootstrap.page_in_kernel(*pm, pml4);
// TODO: Should we exclude the top of this area? (eg, buffers, stacks, etc)
for (unsigned i = 256; i < 384; ++i) {
page_table *pdp = kpml4->get(i);
// Reclaim the old PML4
fa->free(scratch_phys, 1);
if (!pdp) {
g_kernel_space.commit(current_start, current_bytes);
current_start = 0;
current_bytes = 0;
continue;
}
walk_page_table(
pdp, page_table::level::pdp,
current_start, current_bytes,
g_kernel_space);
}
fa->free(reinterpret_cast<uintptr_t>(kargs->page_table_cache), kargs->num_free_tables);
}

View File

@@ -15,7 +15,7 @@ using memory::page_offset;
using memory::page_mappable;
page_manager g_page_manager(g_frame_allocator);
extern kutil::vm_space g_kspace;
extern kutil::vm_space g_kernel_space;
// NB: in 4KiB page table entries, bit 7 isn't pagesize but PAT. Currently this
// doesn't matter, becasue in the default PAT table, both 000 and 100 are WB.
@@ -47,8 +47,7 @@ struct free_page_header
page_manager::page_manager(frame_allocator &frames) :
m_page_cache(nullptr),
m_frames(frames),
m_memory_initialized(false)
m_frames(frames)
{
}
@@ -322,8 +321,11 @@ page_manager::fault_handler(uintptr_t addr)
if (!addr)
return false;
if (m_memory_initialized &&
g_kspace.get(addr) != kutil::vm_state::committed)
bool is_heap = addr >= ::memory::heap_start &&
addr < ::memory::heap_start + ::memory::kernel_max_heap;
if (!is_heap &&
g_kernel_space.get(addr) != kutil::vm_state::committed)
return false;
uintptr_t page = addr & ~0xfffull;

View File

@@ -175,8 +175,6 @@ private:
frame_allocator &m_frames;
bool m_memory_initialized;
friend class memory_bootstrap;
page_manager(const page_manager &) = delete;
};
@@ -208,10 +206,11 @@ page_table_align(T p)
return ((p - 1) & ~0x1fffffull) + 0x200000;
}
namespace kernel {
namespace args {
struct header;
}
}
/// Bootstrap the memory managers.
void memory_initialize(
uint16_t scratch_pages,
const void *memory_map,
size_t map_length,
size_t desc_length);
void memory_initialize(kernel::args::header *mem_map);

View File

@@ -36,6 +36,10 @@ struct page_table
(entries[i] & 0x80) == 0x80;
}
inline bool is_page(level l, int i) const {
return (l == level::pt) || is_large_page(l, i);
}
void dump(
level lvl = level::pml4,
bool recurse = true);