[kernel] Fix frame allocation for multiple pages

There was an inverted boolean logic in determining how many consecutive
pages were available.

Also adding some memory debugging tools I added to track down the recent
memory bugs:

- A direct debugcon::write call, for logging to the debugcon without the
  possible page faults with the logger.
- A new vm_space::lock call, to make a page not fillable in memory
  debugging mode
- A mode in heap_allocator to always alloc new pages, and lock freed
  pages to cause page faults for use-after-free bugs.
- Logging in kobject on creation and deletion
- Page table cache structs are now page-sized for easy pointer math
This commit is contained in:
Justin C. Miller
2023-02-19 01:07:13 -08:00
parent 55c88dd943
commit d2a6113fb7
11 changed files with 131 additions and 20 deletions

View File

@@ -15,9 +15,11 @@
using obj::vm_flags;
// The initial memory for the array of areas for the kernel space
constexpr size_t num_kernel_areas = 8;
static constexpr size_t num_kernel_areas = 8;
static uint64_t kernel_areas[num_kernel_areas * 2];
static constexpr uint64_t locked_page_tag = 0xbadfe11a;
int
vm_space::area::compare(const vm_space::area &o) const
{
@@ -115,7 +117,7 @@ vm_space::can_resize(const obj::vm_area &vma, size_t size) const
{
uintptr_t base = 0;
unsigned n = m_areas.count();
for (unsigned i = 0; i < n - 1; ++i) {
for (unsigned i = 1; i < n - 1; ++i) {
const area &prev = m_areas[i - 1];
if (prev.area != &vma)
continue;
@@ -251,6 +253,38 @@ vm_space::clear(const obj::vm_area &vma, uintptr_t offset, size_t count, bool fr
fa.free(free_start, free_count);
}
void
vm_space::lock(const obj::vm_area &vma, uintptr_t offset, size_t count)
{
using mem::frame_size;
util::scoped_lock lock {m_lock};
uintptr_t base = 0;
if (!find_vma(vma, base))
return;
uintptr_t addr = base + offset;
frame_allocator &fa = frame_allocator::get();
page_table::iterator it {addr, m_pml4};
while (count--) {
uint64_t &e = it.entry(page_table::level::pt);
uintptr_t phys = e & ~0xfffull;
if (e & page_table::flag::present) {
uint64_t orig = e;
e = locked_page_tag;
if (orig & page_table::flag::accessed) {
auto *addr = reinterpret_cast<const uint8_t *>(it.vaddress());
asm ( "invlpg %0" :: "m"(*addr) : "memory" );
}
fa.free(phys, 1);
}
++it;
}
}
uintptr_t
vm_space::lookup(const obj::vm_area &vma, uintptr_t offset)
{
@@ -298,6 +332,14 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault)
if (!area)
return false;
if constexpr (__debug_heap_allocation) {
page_table::iterator it {addr, m_pml4};
uint64_t &e = it.entry(page_table::level::pt);
kassert(e != locked_page_tag, "Use-after-free");
if (e == locked_page_tag)
return false;
}
uintptr_t offset = page - base;
uintptr_t phys_page = 0;
if (!area->get_page(offset, phys_page))