Files
jsix/src/kernel/page_tree.cpp
Justin C. Miller c1d9b35e7c [bootproto] Create new bootproto lib
This is a rather large commit that is widely focused on cleaning things
out of the 'junk drawer' that is src/include. Most notably, several
things that were put in there because they needed somewhere where both
the kernel, boot, and init could read them have been moved to a new lib,
'bootproto'.

- Moved kernel_args.h and init_args.h to bootproto as kernel.h and
  init.h, respectively.

- Moved counted.h and pointer_manipulation.h into util, renaming the
  latter to util/pointers.h.

- Created a new src/include/arch for very arch-dependent definitions,
  and moved some kernel_memory.h constants like frame size, page table
  entry count, etc to arch/amd64/memory.h. Also created arch/memory.h
  which detects platform and includes the former.

- Got rid of kernel_memory.h entirely in favor of a new, cog-based
  approach. The new definitions/memory_layout.csv lists memory regions
  in descending order from the top of memory, their sizes, and whether
  they are shared outside the kernel (ie, boot needs to know them). The
  new header bootproto/memory.h exposes the addresses of the shared
  regions, while the kernel's memory.h gains the start and size of all
  the regions. Also renamed the badly-named page-offset area the linear
  area.

- The python build scripts got a few new features: the ability to parse
  the csv mentioned above in a new memory.py module; the ability to add
  dependencies to existing source files (The list of files that I had to
  pull out of the main list just to add them with the dependency on
  memory.h was getting too large. So I put them back into the sources
  list, and added the dependency post-hoc.); and the ability to
  reference 'source_root', 'build_root', and 'module_root' variables in
  .module files.

- Some utility functions that were in the kernel's memory.h got moved to
  util/pointers.h and util/misc.h, and misc.h's byteswap was renamed
  byteswap32 to be more specific.
2022-01-03 17:44:13 -08:00

151 lines
4.6 KiB
C++

#include <string.h>
#include <arch/memory.h>
#include "assert.h"
#include "frame_allocator.h"
#include "page_tree.h"
// Page tree levels map the following parts of an offset. Note the xxx part of
// the offset but represent the bits of the actual sub-page virtual address.
// (Also note that level 0's entries are physical page addrs, the rest map
// other page_tree nodes)
//
// Level 0: 0000 0000 0003 fxxx 64 pages / 256 KiB
// Level 1: 0000 0000 00fc 0xxx 4K pages / 16 MiB -- 24-bit addressing
// Level 2: 0000 0000 3f00 0xxx 256K pages / 1 GiB
// Level 3: 0000 000f c000 0xxx 16M pages / 64 GiB -- 36-bit addressing
// Level 4: 0000 03f0 0000 0xxx 1G pages / 4 TiB
// Level 5: 0000 fc00 0000 0xxx 64G pages / 256 TiB -- 48-bit addressing
//
// Not supported until 5-level paging:
// Level 6: 003f 0000 0000 0xxx 4T pages / 16 PiB -- 54-bit addressing
// Level 7: 0fc0 0000 0000 0xxx 256T pages / 1 EiB -- 60-bit addressing
static_assert(sizeof(page_tree) == 66 * sizeof(uintptr_t));
static constexpr unsigned max_level = 5;
static constexpr unsigned bits_per_level = 6;
inline int level_shift(uint8_t level) { return level * bits_per_level + arch::frame_bits; }
inline uint64_t level_mask(uint8_t level) { return ~0x3full << level_shift(level); }
inline int index_for(uint64_t off, uint8_t level) { return (off >> level_shift(level)) & 0x3full; }
page_tree::page_tree(uint64_t base, uint8_t level) :
m_base {base & level_mask(level)},
m_level {level}
{
memset(m_entries, 0, sizeof(m_entries));
}
page_tree::~page_tree()
{
if (m_level) {
for (auto &e : m_entries)
delete e.child;
} else {
auto &fa = frame_allocator::get();
for (auto &e : m_entries) {
if (e.entry & 1)
fa.free(e.entry & ~0xfffull, 1);
}
}
}
bool
page_tree::contains(uint64_t offset, uint8_t &index) const
{
return (offset & level_mask(m_level)) == m_base;
}
bool
page_tree::find(const page_tree *root, uint64_t offset, uintptr_t &page)
{
page_tree const *node = root;
while (node) {
uint8_t level = node->m_level;
uint8_t index = 0;
if (!node->contains(offset, index))
return false;
if (!level) {
uintptr_t entry = node->m_entries[index].entry;
page = entry & ~0xfffull;
return (entry & 1); // bit 0 marks 'present'
}
node = node->m_entries[index].child;
}
return false;
}
bool
page_tree::find_or_add(page_tree * &root, uint64_t offset, uintptr_t &page)
{
page_tree *level0 = nullptr;
if (!root) {
// There's no root yet, just make a level0 and make it
// the root.
level0 = new page_tree(offset, 0);
root = level0;
} else {
// Find or insert an existing level0
page_tree **parent = &root;
page_tree *node = root;
uint8_t parent_level = max_level + 1;
while (node) {
uint8_t level = node->m_level;
uint8_t index = 0;
if (!node->contains(offset, index)) {
// We found a valid parent but the slot where this node should
// go contains another node. Insert an intermediate parent of
// this node and a new level0 into the parent.
uint64_t other = node->m_base;
uint8_t lcl = parent_level;
while (index_for(offset, lcl) == index_for(other, lcl)) --lcl;
page_tree *inter = new page_tree(offset, lcl);
inter->m_entries[index_for(other, lcl)].child = node;
*parent = inter;
level0 = new page_tree(offset, 0);
inter->m_entries[index_for(offset, lcl)].child = level0;
break;
}
if (!level) {
level0 = node;
break;
}
parent = &node->m_entries[index].child;
node = *parent;
}
kassert( node || parent, "Both node and parent were null in find_or_add");
if (!node) {
// We found a parent with an empty spot where this node should
// be. Insert a new level0 there.
level0 = new page_tree(offset, 0);
*parent = level0;
}
}
kassert(level0, "Got through find_or_add without a level0");
uint8_t index = index_for(offset, 0);
uint64_t &ent = level0->m_entries[index].entry;
if (!(ent & 1)) {
// No entry for this page exists, so make one
if (!frame_allocator::get().allocate(1, &ent))
return false;
ent |= 1;
}
page = ent & ~0xfffull;
return true;
}