mirror of
https://github.com/justinian/jsix.git
synced 2025-12-10 08:24:32 -08:00
[boot][kernel] Replace frame allocator with bitmap-based one
The previous frame allocator involved a lot of splitting and merging linked lists and lost all information about frames while they were allocated. The new allocator is based on an array of descriptor structures and a bitmap. Each memory map region of allocatable memory becomes one or more descriptors, each mapping up to 1GiB of physical memory. The descriptors implement two levels of a bitmap tree, and have a pointer into the large contiguous bitmap to track individual pages.
This commit is contained in:
@@ -55,7 +55,7 @@ console::console(uefi::boot_services *bs, uefi::protos::simple_text_output *out)
|
||||
m_out->output_string(GIT_VERSION_WIDE);
|
||||
|
||||
m_out->set_attribute(uefi::attribute::light_gray);
|
||||
m_out->output_string(L" booting...\r\n\n");
|
||||
m_out->output_string(L" booting...\r\n");
|
||||
|
||||
if (m_fb.type != kernel::args::fb_type::none) {
|
||||
wchar_t const * type = nullptr;
|
||||
|
||||
@@ -27,7 +27,7 @@ load_file(
|
||||
fs::file file = disk.open(path);
|
||||
buffer b = file.load(type);
|
||||
|
||||
console::print(L" Loaded at: 0x%lx, %d bytes\r\n", b.data, b.size);
|
||||
//console::print(L" Loaded at: 0x%lx, %d bytes\r\n", b.data, b.size);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
@@ -191,11 +191,14 @@ efi_main(uefi::handle image, uefi::system_table *st)
|
||||
args->video = con.fb();
|
||||
status_bar status {con.fb()}; // Switch to fb status display
|
||||
|
||||
// Map the kernel to the appropriate address
|
||||
args::program &kernel = args->programs[0];
|
||||
for (auto §ion : kernel.sections)
|
||||
if (section.size)
|
||||
paging::map_section(args, section);
|
||||
|
||||
memory::fix_frame_blocks(args);
|
||||
|
||||
kernel::entrypoint kentry =
|
||||
reinterpret_cast<kernel::entrypoint>(kernel.entrypoint);
|
||||
status.next();
|
||||
|
||||
@@ -14,6 +14,8 @@ namespace memory {
|
||||
|
||||
using mem_entry = kernel::args::mem_entry;
|
||||
using mem_type = kernel::args::mem_type;
|
||||
using frame_block = kernel::args::frame_block;
|
||||
using kernel::args::frames_per_block;
|
||||
|
||||
size_t fixup_pointer_index = 0;
|
||||
void **fixup_pointers[64];
|
||||
@@ -107,30 +109,144 @@ can_merge(mem_entry &prev, mem_type type, uefi::memory_descriptor *next)
|
||||
}
|
||||
|
||||
void
|
||||
get_uefi_mappings(efi_mem_map *map, bool allocate, uefi::boot_services *bs)
|
||||
get_uefi_mappings(efi_mem_map &map, uefi::boot_services *bs)
|
||||
{
|
||||
size_t length = 0;
|
||||
size_t length = map.total;
|
||||
uefi::status status = bs->get_memory_map(
|
||||
&length, nullptr, &map->key, &map->size, &map->version);
|
||||
&length, map.entries, &map.key, &map.size, &map.version);
|
||||
map.length = length;
|
||||
|
||||
if (status == uefi::status::success)
|
||||
return;
|
||||
|
||||
if (status != uefi::status::buffer_too_small)
|
||||
error::raise(status, L"Error getting memory map size");
|
||||
|
||||
map->length = length;
|
||||
if (map.entries) {
|
||||
try_or_raise(
|
||||
bs->free_pool(reinterpret_cast<void*>(map.entries)),
|
||||
L"Freeing previous memory map space");
|
||||
}
|
||||
|
||||
if (allocate) {
|
||||
map->length += 10*map->size;
|
||||
map.total = length + 10*map.size;
|
||||
|
||||
try_or_raise(
|
||||
bs->allocate_pool(
|
||||
uefi::memory_type::loader_data, map->length,
|
||||
reinterpret_cast<void**>(&map->entries)),
|
||||
uefi::memory_type::loader_data, map.total,
|
||||
reinterpret_cast<void**>(&map.entries)),
|
||||
L"Allocating space for memory map");
|
||||
|
||||
map.length = map.total;
|
||||
try_or_raise(
|
||||
bs->get_memory_map(&map->length, map->entries, &map->key, &map->size, &map->version),
|
||||
bs->get_memory_map(&map.length, map.entries, &map.key, &map.size, &map.version),
|
||||
L"Getting UEFI memory map");
|
||||
}
|
||||
|
||||
inline size_t bitmap_size(size_t frames) { return (frames + 63) / 64; }
|
||||
inline size_t num_blocks(size_t frames) { return (frames + (frames_per_block-1)) / frames_per_block; }
|
||||
|
||||
void
|
||||
build_kernel_frame_blocks(const mem_entry *map, size_t nent, kernel::args::header *args, uefi::boot_services *bs)
|
||||
{
|
||||
status_line status {L"Creating kernel frame accounting map"};
|
||||
|
||||
size_t block_count = 0;
|
||||
size_t total_bitmap_size = 0;
|
||||
for (size_t i = 0; i < nent; ++i) {
|
||||
const mem_entry &ent = map[i];
|
||||
if (ent.type != mem_type::free)
|
||||
continue;
|
||||
|
||||
block_count += num_blocks(ent.pages);
|
||||
total_bitmap_size += bitmap_size(ent.pages) * sizeof(uint64_t);
|
||||
}
|
||||
|
||||
size_t total_size = block_count * sizeof(frame_block) + total_bitmap_size;
|
||||
|
||||
frame_block *blocks = nullptr;
|
||||
try_or_raise(
|
||||
bs->allocate_pages(
|
||||
uefi::allocate_type::any_pages,
|
||||
uefi::memory_type::loader_data,
|
||||
bytes_to_pages(total_size),
|
||||
reinterpret_cast<void**>(&blocks)),
|
||||
L"Error allocating kernel frame block space");
|
||||
|
||||
frame_block *next_block = blocks;
|
||||
for (size_t i = 0; i < nent; ++i) {
|
||||
const mem_entry &ent = map[i];
|
||||
if (ent.type != mem_type::free)
|
||||
continue;
|
||||
|
||||
size_t page_count = ent.pages;
|
||||
uintptr_t base_addr = ent.start;
|
||||
while (page_count) {
|
||||
frame_block *blk = next_block++;
|
||||
bs->set_mem(blk, sizeof(frame_block), 0);
|
||||
|
||||
blk->attrs = ent.attr;
|
||||
blk->base = base_addr;
|
||||
base_addr += frames_per_block * page_size;
|
||||
|
||||
if (page_count >= frames_per_block) {
|
||||
page_count -= frames_per_block;
|
||||
blk->count = frames_per_block;
|
||||
blk->map1 = ~0ull;
|
||||
bs->set_mem(blk->map2, sizeof(blk->map2), 0xff);
|
||||
} else {
|
||||
blk->count = page_count;
|
||||
unsigned i = 0;
|
||||
|
||||
uint64_t b1 = (page_count + 4095) / 4096;
|
||||
blk->map1 = (1 << b1) - 1;
|
||||
|
||||
uint64_t b2 = (page_count + 63) / 64;
|
||||
uint64_t b2q = b2 / 64;
|
||||
uint64_t b2r = b2 % 64;
|
||||
bs->set_mem(blk->map2, b2q, 0xff);
|
||||
blk->map2[b2q] = (1 << b2r) - 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t *bitmap = reinterpret_cast<uint64_t*>(next_block);
|
||||
bs->set_mem(bitmap, total_bitmap_size, 0);
|
||||
for (unsigned i = 0; i < block_count; ++i) {
|
||||
frame_block &blk = blocks[i];
|
||||
blk.bitmap = bitmap;
|
||||
|
||||
size_t b = blk.count / 64;
|
||||
size_t r = blk.count % 64;
|
||||
bs->set_mem(blk.bitmap, b*8, 0xff);
|
||||
blk.bitmap[b] = (1 << r) - 1;
|
||||
|
||||
bitmap += bitmap_size(blk.count);
|
||||
}
|
||||
|
||||
args->frame_block_count = block_count;
|
||||
args->frame_block_pages = bytes_to_pages(total_size);
|
||||
args->frame_blocks = blocks;
|
||||
}
|
||||
|
||||
void
|
||||
fix_frame_blocks(kernel::args::header *args)
|
||||
{
|
||||
// Map the frame blocks to the appropriate address
|
||||
paging::map_pages(args,
|
||||
reinterpret_cast<uintptr_t>(args->frame_blocks),
|
||||
::memory::bitmap_start,
|
||||
args->frame_block_pages,
|
||||
true, false);
|
||||
|
||||
uintptr_t offset = ::memory::bitmap_start -
|
||||
reinterpret_cast<uintptr_t>(args->frame_blocks);
|
||||
|
||||
for (unsigned i = 0; i < args->frame_block_count; ++i) {
|
||||
frame_block &blk = args->frame_blocks[i];
|
||||
blk.bitmap = reinterpret_cast<uint64_t*>(
|
||||
reinterpret_cast<uintptr_t>(blk.bitmap) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
efi_mem_map
|
||||
@@ -139,7 +255,7 @@ build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs)
|
||||
status_line status {L"Creating kernel memory map"};
|
||||
|
||||
efi_mem_map map;
|
||||
get_uefi_mappings(&map, false, bs);
|
||||
get_uefi_mappings(map, bs);
|
||||
|
||||
size_t map_size = map.num_entries() * sizeof(mem_entry);
|
||||
|
||||
@@ -153,9 +269,9 @@ build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs)
|
||||
L"Error allocating kernel memory map module space");
|
||||
|
||||
bs->set_mem(kernel_map, map_size, 0);
|
||||
get_uefi_mappings(&map, true, bs);
|
||||
get_uefi_mappings(map, bs);
|
||||
|
||||
size_t i = 0;
|
||||
size_t nent = 0;
|
||||
bool first = true;
|
||||
for (auto desc : map) {
|
||||
/*
|
||||
@@ -176,11 +292,8 @@ build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs)
|
||||
case uefi::memory_type::boot_services_code:
|
||||
case uefi::memory_type::boot_services_data:
|
||||
case uefi::memory_type::conventional_memory:
|
||||
type = mem_type::free;
|
||||
break;
|
||||
|
||||
case uefi::memory_type::loader_data:
|
||||
type = mem_type::pending;
|
||||
type = mem_type::free;
|
||||
break;
|
||||
|
||||
case uefi::memory_type::runtime_services_code:
|
||||
@@ -210,18 +323,18 @@ build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs)
|
||||
// TODO: validate uefi's map is sorted
|
||||
if (first) {
|
||||
first = false;
|
||||
kernel_map[i].start = desc->physical_start;
|
||||
kernel_map[i].pages = desc->number_of_pages;
|
||||
kernel_map[i].type = type;
|
||||
kernel_map[i].attr = (desc->attribute & 0xffffffff);
|
||||
kernel_map[nent].start = desc->physical_start;
|
||||
kernel_map[nent].pages = desc->number_of_pages;
|
||||
kernel_map[nent].type = type;
|
||||
kernel_map[nent].attr = (desc->attribute & 0xffffffff);
|
||||
continue;
|
||||
}
|
||||
|
||||
mem_entry &prev = kernel_map[i];
|
||||
mem_entry &prev = kernel_map[nent];
|
||||
if (can_merge(prev, type, desc)) {
|
||||
prev.pages += desc->number_of_pages;
|
||||
} else {
|
||||
mem_entry &next = kernel_map[++i];
|
||||
mem_entry &next = kernel_map[++nent];
|
||||
next.start = desc->physical_start;
|
||||
next.pages = desc->number_of_pages;
|
||||
next.type = type;
|
||||
@@ -231,17 +344,19 @@ build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs)
|
||||
|
||||
// Give just the actually-set entries in the header
|
||||
args->mem_map = kernel_map;
|
||||
args->map_count = i;
|
||||
args->map_count = nent;
|
||||
|
||||
/*
|
||||
// kernel map dump
|
||||
for (unsigned i = 0; i < args->map_count; ++i) {
|
||||
for (unsigned i = 0; i < nent; ++i) {
|
||||
const kernel::args::mem_entry &e = kernel_map[i];
|
||||
console::print(L" Range %lx (%lx) %x(%s) [%lu]\r\n",
|
||||
e.start, e.attr, e.type, kernel_memory_type_name(e.type), e.pages);
|
||||
}
|
||||
*/
|
||||
|
||||
build_kernel_frame_blocks(kernel_map, nent, args, bs);
|
||||
get_uefi_mappings(map, bs);
|
||||
return map;
|
||||
}
|
||||
|
||||
|
||||
@@ -41,12 +41,13 @@ struct efi_mem_map
|
||||
using iterator = offset_iterator<desc>;
|
||||
|
||||
size_t length; ///< Total length of the map data
|
||||
size_t total; ///< Total allocated space for map data
|
||||
size_t size; ///< Size of an entry in the array
|
||||
size_t key; ///< Key for detecting changes
|
||||
uint32_t version; ///< Version of the `memory_descriptor` struct
|
||||
desc *entries; ///< The array of UEFI descriptors
|
||||
|
||||
efi_mem_map() : length(0), size(0), key(0), version(0), entries(nullptr) {}
|
||||
efi_mem_map() : length(0), total(0), size(0), key(0), version(0), entries(nullptr) {}
|
||||
|
||||
/// Get the count of entries in the array
|
||||
inline size_t num_entries() const { return length / size; }
|
||||
@@ -62,6 +63,14 @@ struct efi_mem_map
|
||||
/// \returns The uefi memory map used to build the kernel map
|
||||
efi_mem_map build_kernel_mem_map(kernel::args::header *args, uefi::boot_services *bs);
|
||||
|
||||
/// Create the kernel frame allocation maps
|
||||
void build_kernel_frame_blocks(
|
||||
const kernel::args::mem_entry *map, size_t nent,
|
||||
kernel::args::header *args, uefi::boot_services *bs);
|
||||
|
||||
/// Map the frame allocation maps to the right spot and fix up pointers
|
||||
void fix_frame_blocks(kernel::args::header *args);
|
||||
|
||||
/// Activate the given memory mappings. Sets the given page tables live as well
|
||||
/// as informs UEFI runtime services of the new mappings.
|
||||
/// \arg pml4 The root page table for the new mappings
|
||||
|
||||
@@ -212,6 +212,7 @@ allocate_tables(kernel::args::header *args, uefi::boot_services *bs)
|
||||
page_table *pml4 = reinterpret_cast<page_table*>(addr);
|
||||
|
||||
args->pml4 = pml4;
|
||||
args->table_pages = tables_needed;
|
||||
args->table_count = tables_needed - 1;
|
||||
args->page_tables = offset_ptr<void>(addr, page_size);
|
||||
|
||||
@@ -220,7 +221,7 @@ allocate_tables(kernel::args::header *args, uefi::boot_services *bs)
|
||||
add_kernel_pds(pml4, args->page_tables, args->table_count);
|
||||
add_offset_mappings(pml4, args->page_tables, args->table_count);
|
||||
|
||||
console::print(L" Set up initial mappings, %d spare tables.\r\n", args->table_count);
|
||||
//console::print(L" Set up initial mappings, %d spare tables.\r\n", args->table_count);
|
||||
}
|
||||
|
||||
template <typename E>
|
||||
@@ -231,32 +232,31 @@ constexpr bool has_flag(E set, E flag) {
|
||||
}
|
||||
|
||||
void
|
||||
map_section(
|
||||
map_pages(
|
||||
kernel::args::header *args,
|
||||
const kernel::args::program_section §ion)
|
||||
uintptr_t phys, uintptr_t virt,
|
||||
size_t count, bool write_flag, bool exe_flag)
|
||||
{
|
||||
if (!count)
|
||||
return;
|
||||
|
||||
paging::page_table *pml4 =
|
||||
reinterpret_cast<paging::page_table*>(args->pml4);
|
||||
|
||||
size_t pages = memory::bytes_to_pages(section.size);
|
||||
page_entry_iterator<4> iterator{
|
||||
section.virt_addr, pml4,
|
||||
virt, pml4,
|
||||
args->page_tables,
|
||||
args->table_count};
|
||||
|
||||
using kernel::args::section_flags;
|
||||
|
||||
uint64_t flags = page_flags;
|
||||
if (!has_flag(section.type, section_flags::execute))
|
||||
if (!exe_flag)
|
||||
flags |= (1ull << 63); // set NX bit
|
||||
|
||||
if (has_flag(section.type, section_flags::write))
|
||||
if (write_flag)
|
||||
flags |= 2;
|
||||
|
||||
uintptr_t phys = section.phys_addr;
|
||||
while (true) {
|
||||
*iterator = phys | flags;
|
||||
if (--pages == 0)
|
||||
if (--count == 0)
|
||||
break;
|
||||
|
||||
iterator.increment();
|
||||
@@ -264,6 +264,24 @@ map_section(
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
map_section(
|
||||
kernel::args::header *args,
|
||||
const kernel::args::program_section §ion)
|
||||
{
|
||||
using kernel::args::section_flags;
|
||||
|
||||
size_t pages = memory::bytes_to_pages(section.size);
|
||||
|
||||
map_pages(
|
||||
args,
|
||||
section.phys_addr,
|
||||
section.virt_addr,
|
||||
pages,
|
||||
has_flag(section.type, section_flags::write),
|
||||
has_flag(section.type, section_flags::execute));
|
||||
}
|
||||
|
||||
|
||||
} // namespace paging
|
||||
} // namespace boot
|
||||
|
||||
@@ -38,6 +38,14 @@ void allocate_tables(
|
||||
/// tables in the current PML4.
|
||||
void add_current_mappings(page_table *new_pml4);
|
||||
|
||||
/// Map physical memory pages to virtual addresses in the given page tables.
|
||||
/// \arg args The kernel args header, used for the page table cache and pml4
|
||||
/// \arg section The program section to load
|
||||
void map_pages(
|
||||
kernel::args::header *args,
|
||||
uintptr_t phys, uintptr_t virt,
|
||||
size_t count, bool write_flag, bool exe_flag);
|
||||
|
||||
/// Map a program section in physical memory to its virtual address in the
|
||||
/// given page tables.
|
||||
/// \arg args The kernel args header, used for the page table cache and pml4
|
||||
|
||||
@@ -75,6 +75,18 @@ struct mem_entry
|
||||
uint32_t attr;
|
||||
};
|
||||
|
||||
constexpr size_t frames_per_block = 64 * 64 * 64;
|
||||
|
||||
struct frame_block
|
||||
{
|
||||
uintptr_t base;
|
||||
uint32_t count;
|
||||
uint32_t attrs;
|
||||
uint64_t map1;
|
||||
uint64_t map2[64];
|
||||
uint64_t *bitmap;
|
||||
};
|
||||
|
||||
enum class boot_flags : uint16_t {
|
||||
none = 0x0000,
|
||||
debug = 0x0001
|
||||
@@ -88,6 +100,7 @@ struct header {
|
||||
void *pml4;
|
||||
void *page_tables;
|
||||
size_t table_count;
|
||||
size_t table_pages;
|
||||
|
||||
program *programs;
|
||||
size_t num_programs;
|
||||
@@ -98,6 +111,10 @@ struct header {
|
||||
mem_entry *mem_map;
|
||||
size_t map_count;
|
||||
|
||||
frame_block *frame_blocks;
|
||||
size_t frame_block_count;
|
||||
size_t frame_block_pages;
|
||||
|
||||
void *runtime_services;
|
||||
void *acpi_table;
|
||||
|
||||
|
||||
@@ -35,11 +35,17 @@ namespace memory {
|
||||
constexpr uintptr_t stacks_start = heap_start - kernel_max_stacks;
|
||||
|
||||
/// Max size of kernel buffers area
|
||||
constexpr size_t kernel_max_buffers = 0x10000000000ull; // 1TiB
|
||||
constexpr size_t kernel_max_buffers = 0x8000000000ull; // 512GiB
|
||||
|
||||
/// Start of kernel buffers
|
||||
constexpr uintptr_t buffers_start = stacks_start - kernel_max_buffers;
|
||||
|
||||
/// Max size of kernel bitmap area
|
||||
constexpr size_t kernel_max_bitmap = 0x8000000000ull; // 512GiB
|
||||
|
||||
/// Start of kernel bitmap
|
||||
constexpr uintptr_t bitmap_start = buffers_start - kernel_max_bitmap;
|
||||
|
||||
/// First kernel space PML4 entry
|
||||
constexpr unsigned pml4e_kernel = 256;
|
||||
|
||||
|
||||
@@ -2,20 +2,11 @@
|
||||
#include "kutil/assert.h"
|
||||
#include "kutil/memory.h"
|
||||
#include "frame_allocator.h"
|
||||
#include "kernel_args.h"
|
||||
#include "kernel_memory.h"
|
||||
#include "log.h"
|
||||
|
||||
using memory::frame_size;
|
||||
using memory::page_offset;
|
||||
using frame_block_node = kutil::list_node<frame_block>;
|
||||
|
||||
int
|
||||
frame_block::compare(const frame_block &rhs) const
|
||||
{
|
||||
if (address < rhs.address)
|
||||
return -1;
|
||||
else if (address > rhs.address)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
frame_allocator &
|
||||
@@ -25,54 +16,142 @@ frame_allocator::get()
|
||||
return g_frame_allocator;
|
||||
}
|
||||
|
||||
frame_allocator::frame_allocator() {}
|
||||
frame_allocator::frame_allocator(kernel::args::frame_block *frames, size_t count) :
|
||||
m_blocks(frames),
|
||||
m_count(count)
|
||||
{
|
||||
}
|
||||
|
||||
inline unsigned
|
||||
bsf(uint64_t v)
|
||||
{
|
||||
asm ("tzcntq %q0, %q1" : "=r"(v) : "r"(v) : "cc");
|
||||
return v;
|
||||
}
|
||||
|
||||
size_t
|
||||
frame_allocator::allocate(size_t count, uintptr_t *address)
|
||||
{
|
||||
kassert(!m_free.empty(), "frame_allocator::pop_frames ran out of free frames!");
|
||||
if (m_free.empty())
|
||||
return 0;
|
||||
for (long i = m_count - 1; i >= 0; ++i) {
|
||||
frame_block &block = m_blocks[i];
|
||||
|
||||
auto *first = m_free.front();
|
||||
if (!block.map1)
|
||||
continue;
|
||||
|
||||
// Tree walk to find the first available page
|
||||
unsigned o1 = bsf(block.map1);
|
||||
|
||||
uint64_t m2 = block.map2[o1];
|
||||
unsigned o2 = bsf(m2);
|
||||
|
||||
uint64_t m3 = block.bitmap[(o1 << 6) + o2];
|
||||
unsigned o3 = bsf(m3);
|
||||
|
||||
unsigned frame = (o1 << 12) + (o2 << 6) + o3;
|
||||
|
||||
// See how many contiguous pages are here
|
||||
unsigned n = bsf(~m3 >> o3);
|
||||
if (n > count)
|
||||
n = count;
|
||||
|
||||
*address = block.base + frame * frame_size;
|
||||
|
||||
// Clear the bits to mark these pages allocated
|
||||
m3 &= ~(((1 << n) - 1) << o3);
|
||||
block.bitmap[(o1 << 6) + o2] = m3;
|
||||
if (!m3) {
|
||||
// if that was it for this group, clear the next level bit
|
||||
m2 &= ~(1 << o2);
|
||||
block.map2[o1] = m2;
|
||||
|
||||
if (!m2) {
|
||||
// if that was cleared too, update the top level
|
||||
block.map1 &= ~(1 << o1);
|
||||
}
|
||||
}
|
||||
|
||||
if (count >= first->count) {
|
||||
*address = first->address;
|
||||
m_free.remove(first);
|
||||
return first->count;
|
||||
} else {
|
||||
first->count -= count;
|
||||
*address = first->address + (first->count * frame_size);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
inline uintptr_t end(frame_block *node) { return node->address + node->count * frame_size; }
|
||||
kassert(false, "frame_allocator ran out of free frames!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
frame_allocator::free(uintptr_t address, size_t count)
|
||||
{
|
||||
kassert(address % frame_size == 0, "Trying to free a non page-aligned frame!");
|
||||
|
||||
frame_block_node *node =
|
||||
reinterpret_cast<frame_block_node*>(address + page_offset);
|
||||
if (!count)
|
||||
return;
|
||||
|
||||
kutil::memset(node, 0, sizeof(frame_block_node));
|
||||
node->address = address;
|
||||
node->count = count;
|
||||
for (long i = 0; i < m_count; ++i) {
|
||||
frame_block &block = m_blocks[i];
|
||||
uintptr_t end = block.base + block.count * frame_size;
|
||||
|
||||
m_free.sorted_insert(node);
|
||||
if (address < block.base || address >= end)
|
||||
continue;
|
||||
|
||||
frame_block_node *next = node->next();
|
||||
if (next && end(node) == next->address) {
|
||||
node->count += next->count;
|
||||
m_free.remove(next);
|
||||
uint64_t frame = (address - block.base) >> 12;
|
||||
unsigned o1 = (frame >> 12) & 0x3f;
|
||||
unsigned o2 = (frame >> 6) & 0x3f;
|
||||
unsigned o3 = frame & 0x3f;
|
||||
|
||||
while (count--) {
|
||||
block.map1 |= (1 << o1);
|
||||
block.map2[o1] |= (1 << o2);
|
||||
block.bitmap[o2] |= (1 << o3);
|
||||
if (++o3 == 64) {
|
||||
o3 = 0;
|
||||
if (++o2 == 64) {
|
||||
o2 = 0;
|
||||
++o1;
|
||||
kassert(o1 < 64, "Tried to free pages past the end of a block");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
frame_allocator::used(uintptr_t address, size_t count)
|
||||
{
|
||||
kassert(address % frame_size == 0, "Trying to mark a non page-aligned frame!");
|
||||
|
||||
if (!count)
|
||||
return;
|
||||
|
||||
for (long i = 0; i < m_count; ++i) {
|
||||
frame_block &block = m_blocks[i];
|
||||
uintptr_t end = block.base + block.count * frame_size;
|
||||
|
||||
if (address < block.base || address >= end)
|
||||
continue;
|
||||
|
||||
uint64_t frame = (address - block.base) >> 12;
|
||||
unsigned o1 = (frame >> 12) & 0x3f;
|
||||
unsigned o2 = (frame >> 6) & 0x3f;
|
||||
unsigned o3 = frame & 0x3f;
|
||||
|
||||
while (count--) {
|
||||
block.bitmap[o2] &= ~(1 << o3);
|
||||
if (!block.bitmap[o2]) {
|
||||
block.map2[o1] &= ~(1 << o2);
|
||||
|
||||
if (!block.map2[o1]) {
|
||||
block.map1 &= ~(1 << o1);
|
||||
}
|
||||
}
|
||||
|
||||
if (++o3 == 64) {
|
||||
o3 = 0;
|
||||
if (++o2 == 64) {
|
||||
o2 = 0;
|
||||
++o1;
|
||||
kassert(o1 < 64, "Tried to mark pages past the end of a block");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
frame_block_node *prev = node->prev();
|
||||
if (prev && end(prev) == address) {
|
||||
prev->count += node->count;
|
||||
m_free.remove(node);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,17 +4,21 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "kutil/linked_list.h"
|
||||
|
||||
namespace kernel {
|
||||
namespace args {
|
||||
struct frame_block;
|
||||
using frame_block_list = kutil::linked_list<frame_block>;
|
||||
}}
|
||||
|
||||
/// Allocator for physical memory frames
|
||||
class frame_allocator
|
||||
{
|
||||
public:
|
||||
/// Default constructor
|
||||
frame_allocator();
|
||||
using frame_block = kernel::args::frame_block;
|
||||
|
||||
/// Constructor
|
||||
/// \arg blocks The bootloader-supplied frame bitmap block list
|
||||
/// \arg count Number of entries in the block list
|
||||
frame_allocator(frame_block *frames, size_t count);
|
||||
|
||||
/// Get free frames from the free list. Only frames from the first free block
|
||||
/// are returned, so the number may be less than requested, but they will
|
||||
@@ -29,26 +33,18 @@ public:
|
||||
/// \arg count The number of frames to be freed
|
||||
void free(uintptr_t address, size_t count);
|
||||
|
||||
/// Mark frames as used
|
||||
/// \arg address The physical address of the first frame to free
|
||||
/// \arg count The number of frames to be freed
|
||||
void used(uintptr_t address, size_t count);
|
||||
|
||||
/// Get the global frame allocator
|
||||
static frame_allocator & get();
|
||||
|
||||
private:
|
||||
frame_block_list m_free; ///< Free frames list
|
||||
frame_block *m_blocks;
|
||||
long m_count;
|
||||
|
||||
frame_allocator() = delete;
|
||||
frame_allocator(const frame_allocator &) = delete;
|
||||
};
|
||||
|
||||
|
||||
/// A block of contiguous frames. Each `frame_block` represents contiguous
|
||||
/// physical frames with the same attributes.
|
||||
struct frame_block
|
||||
{
|
||||
uintptr_t address;
|
||||
uint32_t count;
|
||||
|
||||
/// Compare two blocks by address.
|
||||
/// \arg rhs The right-hand comparator
|
||||
/// \returns <0 if this is sorts earlier, >0 if this sorts later, 0 for equal
|
||||
int compare(const frame_block &rhs) const;
|
||||
};
|
||||
|
||||
|
||||
@@ -37,8 +37,8 @@ extern void __kernel_assert(const char *, unsigned, const char *);
|
||||
|
||||
/// Bootstrap the memory managers.
|
||||
void setup_pat();
|
||||
void memory_initialize_pre_ctors(kernel::args::header *kargs);
|
||||
void memory_initialize_post_ctors(kernel::args::header *kargs);
|
||||
void memory_initialize_pre_ctors(kernel::args::header &kargs);
|
||||
void memory_initialize_post_ctors(kernel::args::header &kargs);
|
||||
|
||||
using namespace kernel;
|
||||
|
||||
@@ -92,9 +92,9 @@ kernel_main(args::header *header)
|
||||
gdt_init();
|
||||
interrupts_init();
|
||||
|
||||
memory_initialize_pre_ctors(header);
|
||||
memory_initialize_pre_ctors(*header);
|
||||
run_constructors();
|
||||
memory_initialize_post_ctors(header);
|
||||
memory_initialize_post_ctors(*header);
|
||||
|
||||
for (size_t i = 0; i < header->num_modules; ++i) {
|
||||
args::module &mod = header->modules[i];
|
||||
|
||||
@@ -14,15 +14,8 @@
|
||||
#include "objects/vm_area.h"
|
||||
#include "vm_space.h"
|
||||
|
||||
using memory::frame_size;
|
||||
using memory::heap_start;
|
||||
using memory::kernel_max_heap;
|
||||
using memory::kernel_offset;
|
||||
using memory::heap_start;
|
||||
using memory::page_offset;
|
||||
using memory::pml4e_kernel;
|
||||
using memory::pml4e_offset;
|
||||
using memory::table_entries;
|
||||
|
||||
using namespace kernel;
|
||||
|
||||
@@ -61,45 +54,64 @@ void * kalloc(size_t size) { return g_kernel_heap.allocate(size); }
|
||||
void kfree(void *p) { return g_kernel_heap.free(p); }
|
||||
}
|
||||
|
||||
/*
|
||||
void walk_page_table(
|
||||
page_table *table,
|
||||
page_table::level level,
|
||||
uintptr_t ¤t_start,
|
||||
size_t ¤t_bytes,
|
||||
vm_area &karea)
|
||||
void
|
||||
memory_initialize_pre_ctors(args::header &kargs)
|
||||
{
|
||||
constexpr size_t huge_page_size = (1ull<<30);
|
||||
constexpr size_t large_page_size = (1ull<<21);
|
||||
using kernel::args::frame_block;
|
||||
|
||||
for (unsigned i = 0; i < table_entries; ++i) {
|
||||
page_table *next = table->get(i);
|
||||
if (!next) {
|
||||
if (current_bytes)
|
||||
karea.commit(current_start, current_bytes);
|
||||
current_start = 0;
|
||||
current_bytes = 0;
|
||||
continue;
|
||||
} else if (table->is_page(level, i)) {
|
||||
if (!current_bytes)
|
||||
current_start = reinterpret_cast<uintptr_t>(next);
|
||||
current_bytes +=
|
||||
(level == page_table::level::pt
|
||||
? frame_size
|
||||
: level == page_table::level::pd
|
||||
? large_page_size
|
||||
: huge_page_size);
|
||||
} else {
|
||||
page_table::level deeper =
|
||||
static_cast<page_table::level>(
|
||||
static_cast<unsigned>(level) + 1);
|
||||
new (&g_kernel_heap) kutil::heap_allocator {heap_start, kernel_max_heap};
|
||||
|
||||
walk_page_table(
|
||||
next, deeper, current_start, current_bytes, kspace);
|
||||
frame_block *blocks = reinterpret_cast<frame_block*>(memory::bitmap_start);
|
||||
new (&g_frame_allocator) frame_allocator {blocks, kargs.frame_block_count};
|
||||
|
||||
// Mark all the things the bootloader allocated for us as used
|
||||
g_frame_allocator.used(
|
||||
reinterpret_cast<uintptr_t>(kargs.frame_blocks),
|
||||
kargs.frame_block_pages);
|
||||
|
||||
g_frame_allocator.used(
|
||||
reinterpret_cast<uintptr_t>(kargs.pml4),
|
||||
kargs.table_pages);
|
||||
|
||||
for (unsigned i = 0; i < kargs.num_modules; ++i) {
|
||||
const kernel::args::module &mod = kargs.modules[i];
|
||||
g_frame_allocator.used(
|
||||
reinterpret_cast<uintptr_t>(mod.location),
|
||||
memory::page_count(mod.size));
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < kargs.num_programs; ++i) {
|
||||
const kernel::args::program &prog = kargs.programs[i];
|
||||
for (auto § : prog.sections) {
|
||||
if (!sect.size) continue;
|
||||
g_frame_allocator.used(
|
||||
sect.phys_addr,
|
||||
memory::page_count(sect.size));
|
||||
}
|
||||
}
|
||||
|
||||
page_table *kpml4 = reinterpret_cast<page_table*>(kargs.pml4);
|
||||
process *kp = process::create_kernel_process(kpml4);
|
||||
vm_space &vm = kp->space();
|
||||
|
||||
vm_area *heap = new (&g_kernel_heap_area)
|
||||
vm_area_open(kernel_max_heap, vm, vm_flags::write);
|
||||
|
||||
vm.add(heap_start, heap);
|
||||
}
|
||||
|
||||
void
|
||||
memory_initialize_post_ctors(args::header &kargs)
|
||||
{
|
||||
vm_space &vm = vm_space::kernel_space();
|
||||
vm.add(memory::stacks_start, &g_kernel_stacks);
|
||||
vm.add(memory::buffers_start, &g_kernel_buffers);
|
||||
|
||||
g_frame_allocator.free(
|
||||
reinterpret_cast<uintptr_t>(kargs.page_tables),
|
||||
kargs.table_count);
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
static void
|
||||
log_mtrrs()
|
||||
@@ -166,60 +178,3 @@ setup_pat()
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
memory_initialize_pre_ctors(args::header *kargs)
|
||||
{
|
||||
new (&g_kernel_heap) kutil::heap_allocator {heap_start, kernel_max_heap};
|
||||
new (&g_frame_allocator) frame_allocator;
|
||||
|
||||
args::mem_entry *entries = kargs->mem_map;
|
||||
const size_t count = kargs->map_count;
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
// TODO: use entry attributes
|
||||
// TODO: copy anything we need from "pending" memory and free it
|
||||
args::mem_entry &e = entries[i];
|
||||
if (e.type == args::mem_type::free)
|
||||
g_frame_allocator.free(e.start, e.pages);
|
||||
}
|
||||
|
||||
page_table *kpml4 = reinterpret_cast<page_table*>(kargs->pml4);
|
||||
process *kp = process::create_kernel_process(kpml4);
|
||||
vm_space &vm = kp->space();
|
||||
|
||||
vm_area *heap = new (&g_kernel_heap_area)
|
||||
vm_area_open(memory::kernel_max_heap, vm, vm_flags::write);
|
||||
|
||||
vm.add(memory::heap_start, heap);
|
||||
}
|
||||
|
||||
void
|
||||
memory_initialize_post_ctors(args::header *kargs)
|
||||
{
|
||||
/*
|
||||
uintptr_t current_start = 0;
|
||||
size_t current_bytes = 0;
|
||||
|
||||
// TODO: Should we exclude the top of this area? (eg, buffers, stacks, etc)
|
||||
page_table *kpml4 = reinterpret_cast<page_table*>(kargs->pml4);
|
||||
for (unsigned i = pml4e_kernel; i < pml4e_offset; ++i) {
|
||||
page_table *pdp = kpml4->get(i);
|
||||
kassert(pdp, "Bootloader did not create all kernelspace PDs");
|
||||
|
||||
walk_page_table(
|
||||
pdp, page_table::level::pdp,
|
||||
current_start, current_bytes,
|
||||
g_kernel_space);
|
||||
}
|
||||
|
||||
if (current_bytes)
|
||||
g_kernel_space.commit(current_start, current_bytes);
|
||||
*/
|
||||
vm_space &vm = vm_space::kernel_space();
|
||||
vm.add(memory::stacks_start, &g_kernel_stacks);
|
||||
vm.add(memory::buffers_start, &g_kernel_buffers);
|
||||
|
||||
g_frame_allocator.free(
|
||||
reinterpret_cast<uintptr_t>(kargs->page_tables),
|
||||
kargs->table_count);
|
||||
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ CPU_FEATURE_REQ(pat, 0x00000001, 0, edx, 16)
|
||||
CPU_FEATURE_REQ(fxsr, 0x00000001, 0, edx, 24)
|
||||
|
||||
CPU_FEATURE_OPT(fsgsbase, 0x00000007, 0, ebx, 0)
|
||||
CPU_FEATURE_OPT(bmi1, 0x00000007, 0, ebx, 3)
|
||||
CPU_FEATURE_OPT(invpcid, 0x00000007, 0, ebx, 10)
|
||||
|
||||
CPU_FEATURE_OPT(pku, 0x00000007, 0, ecx, 3)
|
||||
|
||||
Reference in New Issue
Block a user