[kernel] Fix frame allocation for multiple pages

There was an inverted boolean logic in determining how many consecutive
pages were available.

Also adding some memory debugging tools I added to track down the recent
memory bugs:

- A direct debugcon::write call, for logging to the debugcon without the
  possible page faults with the logger.
- A new vm_space::lock call, to make a page not fillable in memory
  debugging mode
- A mode in heap_allocator to always alloc new pages, and lock freed
  pages to cause page faults for use-after-free bugs.
- Logging in kobject on creation and deletion
- Page table cache structs are now page-sized for easy pointer math
This commit is contained in:
Justin C. Miller
2023-02-19 01:07:13 -08:00
parent 55c88dd943
commit d2a6113fb7
11 changed files with 131 additions and 20 deletions

View File

@@ -26,6 +26,20 @@ namespace {
}
} // anon namespace
void
write(const char *fmt, ...)
{
char buffer[256];
va_list va;
va_start(va, fmt);
size_t n = util::vformat({buffer, sizeof(buffer)}, fmt, va);
va_end(va);
debug_out(buffer, n);
debug_out("\r\n", 2);
}
void
output(j6_log_entry *entry)
{

View File

@@ -16,6 +16,7 @@ static constexpr bool enable =
static constexpr uint16_t port = 0x6600;
void logger_task();
void write(const char *fmt, ...);
inline void
init_logger()

View File

@@ -1,6 +1,7 @@
#include <bootproto/kernel.h>
#include "assert.h"
#include "debugcon.h"
#include "frame_allocator.h"
#include "logger.h"
#include "memory.h"
@@ -51,7 +52,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
unsigned frame = (o1 << 12) + (o2 << 6) + o3;
// See how many contiguous pages are here
unsigned n = bsf(~m3 >> o3);
unsigned n = bsf(~(m3 >> o3));
if (n > count)
n = count;
@@ -71,6 +72,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
}
}
//debugcon::write("Allocated %2d frames at %016lx - %016lx", n, *address, *address + n * frame_size);
return n;
}
@@ -88,6 +90,8 @@ frame_allocator::free(uintptr_t address, size_t count)
if (!count)
return;
//debugcon::write("Freeing %2d frames at %016lx - %016lx", count, address, address + count * frame_size);
for (long i = 0; i < m_count; ++i) {
frame_block &block = m_blocks[i];
uintptr_t end = block.base + block.count * frame_size;

View File

@@ -8,6 +8,8 @@
#include "assert.h"
#include "heap_allocator.h"
#include "memory.h"
#include "objects/vm_area.h"
#include "vm_space.h"
uint32_t & get_map_key(heap_allocator::block_info &info) { return info.offset; }
@@ -46,7 +48,6 @@ heap_allocator::heap_allocator(uintptr_t start, size_t size, uintptr_t heapmap)
m_maxsize {size},
m_allocated_size {0},
m_map (reinterpret_cast<block_info*>(heapmap), 512)
{
memset(m_free, 0, sizeof(m_free));
}
@@ -57,6 +58,11 @@ heap_allocator::allocate(size_t length)
if (length == 0)
return nullptr;
static constexpr unsigned min_order =
__debug_heap_allocation ?
12 : // allocating full pages in debug mode
heap_allocator::min_order;
unsigned order = util::log2(length);
if (order < min_order)
order = min_order;
@@ -98,6 +104,15 @@ heap_allocator::free(void *p)
size_t size = (1ull << info->order);
m_allocated_size -= size;
if constexpr (__debug_heap_allocation) {
extern obj::vm_area_untracked &g_kernel_heap_area;
size_t offset = reinterpret_cast<uintptr_t>(p) - mem::heap_offset;
size_t pages = mem::bytes_to_pages(size);
vm_space::kernel_space().lock(g_kernel_heap_area, offset, pages);
return;
}
block->clear(info->order);
block = merge_block(block);
register_free_block(block, block->order);

View File

@@ -34,8 +34,9 @@ public:
/// allocation with the contents copied over.
void * reallocate(void *p, size_t old_length, size_t new_length);
/// Minimum block size is (2^min_order). Must be at least 6.
static const unsigned min_order = 6; // 2^6 == 64 B
/// Minimum block size is (2^min_order). Must be at least 5 in
/// order to hold a free_header.
static const unsigned min_order = 5; // 2^5 == 32 B
/// Maximum block size is (2^max_order). Must be less than 32 + min_order.
static const unsigned max_order = 22; // 2^22 == 4 MiB

View File

@@ -88,3 +88,6 @@ constexpr uintptr_t page_align_up(uintptr_t a) { return page_align_down(a-1) + f
void initialize(bootproto::args &args);
} // namespace mem
static constexpr bool __debug_heap_allocation = false;

View File

@@ -2,6 +2,7 @@
#include <j6/types.h>
#include "assert.h"
#include "logger.h"
#include "objects/kobject.h"
#include "objects/thread.h"
@@ -14,6 +15,13 @@ static uint32_t next_oids [types_count] = { 0 };
static_assert(types_count <= (1 << kobject::koid_type_bits),
"kobject::koid_type_bits cannot represent all kobject types");
static const char *type_names[] = {
#define OBJECT_TYPE( name, val ) #name ,
#include <j6/tables/object_types.inc>
#undef OBJECT_TYPE
nullptr
};
static uint32_t
oid_generate(kobject::type t)
{
@@ -26,13 +34,25 @@ kobject::kobject(type t) :
m_handle_count {0},
m_type {t},
m_obj_id {oid_generate(t)}
{}
{
log::spam(logs::objs, "%s[%02lx] created @ 0x%lx", type_name(m_type), m_obj_id, this);
}
kobject::~kobject() {}
kobject::~kobject()
{
log::spam(logs::objs, "%s[%02lx] deleted", type_name(m_type), m_obj_id);
}
const char *
kobject::type_name(type t)
{
return type_names[static_cast<int>(t)];
}
void
kobject::on_no_handles()
{
log::verbose(logs::objs, "Deleting %s[%02lx] on no handles", type_name(m_type), m_obj_id);
delete this;
}

View File

@@ -38,6 +38,8 @@ public:
return static_cast<type>((koid >> koid_type_shift) & koid_type_mask);
}
static const char * type_name(type t);
/// Get this object's type
inline type get_type() const { return m_type; }

View File

@@ -171,7 +171,13 @@ page_table::set(int i, page_table *p, uint16_t flags)
(flags & 0xfff);
}
struct free_page_header { free_page_header *next; };
struct free_page_header
{
free_page_header *next;
static constexpr size_t x_count = (frame_size - sizeof(next)) / sizeof(uint64_t);
uint64_t x [x_count];
};
page_table *
page_table::get_table_page()
@@ -213,18 +219,14 @@ page_table::fill_table_page_cache()
uintptr_t phys = 0;
size_t n = fa.allocate(request_pages, &phys);
free_page_header *start =
free_page_header *pages =
mem::to_virtual<free_page_header>(phys);
for (int i = 0; i < n - 1; ++i)
util::offset_pointer(start, i * frame_size)
->next = util::offset_pointer(start, (i+1) * frame_size);
free_page_header *end =
util::offset_pointer(start, (n-1) * frame_size);
end->next = s_page_cache;
s_page_cache = start;
for (int i = 0; i < n - 1; ++i) {
pages[i].next = &pages[i + 1];
}
pages[n - 1].next = s_page_cache;
s_page_cache = pages;
}
void

View File

@@ -15,9 +15,11 @@
using obj::vm_flags;
// The initial memory for the array of areas for the kernel space
constexpr size_t num_kernel_areas = 8;
static constexpr size_t num_kernel_areas = 8;
static uint64_t kernel_areas[num_kernel_areas * 2];
static constexpr uint64_t locked_page_tag = 0xbadfe11a;
int
vm_space::area::compare(const vm_space::area &o) const
{
@@ -115,7 +117,7 @@ vm_space::can_resize(const obj::vm_area &vma, size_t size) const
{
uintptr_t base = 0;
unsigned n = m_areas.count();
for (unsigned i = 0; i < n - 1; ++i) {
for (unsigned i = 1; i < n - 1; ++i) {
const area &prev = m_areas[i - 1];
if (prev.area != &vma)
continue;
@@ -251,6 +253,38 @@ vm_space::clear(const obj::vm_area &vma, uintptr_t offset, size_t count, bool fr
fa.free(free_start, free_count);
}
void
vm_space::lock(const obj::vm_area &vma, uintptr_t offset, size_t count)
{
using mem::frame_size;
util::scoped_lock lock {m_lock};
uintptr_t base = 0;
if (!find_vma(vma, base))
return;
uintptr_t addr = base + offset;
frame_allocator &fa = frame_allocator::get();
page_table::iterator it {addr, m_pml4};
while (count--) {
uint64_t &e = it.entry(page_table::level::pt);
uintptr_t phys = e & ~0xfffull;
if (e & page_table::flag::present) {
uint64_t orig = e;
e = locked_page_tag;
if (orig & page_table::flag::accessed) {
auto *addr = reinterpret_cast<const uint8_t *>(it.vaddress());
asm ( "invlpg %0" :: "m"(*addr) : "memory" );
}
fa.free(phys, 1);
}
++it;
}
}
uintptr_t
vm_space::lookup(const obj::vm_area &vma, uintptr_t offset)
{
@@ -298,6 +332,14 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault)
if (!area)
return false;
if constexpr (__debug_heap_allocation) {
page_table::iterator it {addr, m_pml4};
uint64_t &e = it.entry(page_table::level::pt);
kassert(e != locked_page_tag, "Use-after-free");
if (e == locked_page_tag)
return false;
}
uintptr_t offset = page - base;
uintptr_t phys_page = 0;
if (!area->get_page(offset, phys_page))

View File

@@ -65,7 +65,14 @@ public:
/// \arg offset Offset of the starting virutal address from the VMA base
/// \arg count The number of pages worth of mappings to clear
/// \arg free If true, free the pages back to the system
void clear(const obj::vm_area &vma, uintptr_t start, size_t count, bool free = false);
void clear(const obj::vm_area &vma, uintptr_t offset, size_t count, bool free = false);
/// Clear mappings from the given region, and mark it as locked. Used for
/// debugging heap allocation reuse.
/// \arg area The VMA these mappings applies to
/// \arg offset Offset of the starting virutal address from the VMA base
/// \arg count The number of pages worth of mappings to clear
void lock(const obj::vm_area &vma, uintptr_t offset, size_t count);
/// Look up the address of a given VMA's offset
uintptr_t lookup(const obj::vm_area &vma, uintptr_t offset);