Clean up process memory on exit.

Additionally, there were several bug fixes needed to allow this:
- frame_allocator was allocating its frame_blocks from the heap, causing
  a circular dependency. Now it gives itself a page on its own when
  needed.
- frame_allocator::free was putting any trailing pages in a block back
  into the list after the current block, so they would be the next block
  iterated to.
- frame_allocator::free was updating the address it was looking for
  after freeing some pages, but not the count it was looking for, so it
  would eventually free all pages after the initial address.
This commit is contained in:
Justin C. Miller
2019-04-06 11:19:38 -07:00
parent c605793a9d
commit 863555ec6b
8 changed files with 110 additions and 31 deletions

View File

@@ -1,26 +0,0 @@
#pragma once
/// \file kernel_memory.h
/// Constants related to the kernel's memory layout
namespace memory {
/// Size of a single page frame.
static const size_t frame_size = 0x1000;
/// Start of kernel memory.
static const uintptr_t kernel_offset = 0xffffff0000000000;
/// Offset from physical where page tables are mapped.
static const uintptr_t page_offset = 0xffffff8000000000;
/// Initial process thread's stack address
static const uintptr_t initial_stack = 0x0000800000000000;
/// Initial process thread's stack size, in pages
static const unsigned initial_stack_pages = 1;
/// Helper to determine if a physical address can be accessed
/// through the page_offset area.
inline bool page_mappable(uintptr_t a) { return (a & page_offset) == 0; }
} // namespace memory

View File

@@ -96,6 +96,8 @@ page_manager::copy_page(uintptr_t orig)
paged_copy = true;
}
// TODO: multiple page copies at a time, so that we don't have to keep
// paying this mapping penalty
if (paged_orig || paged_copy) {
set_pml4(get_pml4());
__sync_synchronize();
@@ -265,17 +267,15 @@ page_manager::map_pages(uintptr_t address, size_t count, bool user, page_table *
}
void
page_manager::unmap_table(page_table *table, page_table::level lvl, bool free)
page_manager::unmap_table(page_table *table, page_table::level lvl, bool free, page_table_indices index)
{
log::debug(logs::paging, "Unmapping%s lv %d table at %016lx",
free ? " (and freeing)" : "", lvl, table);
const int max =
lvl == page_table::level::pml4 ?
510 :
512;
uintptr_t free_start = 0;
uintptr_t free_start_virt = 0;
uintptr_t free_count = 0;
size_t size =
@@ -287,6 +287,8 @@ page_manager::unmap_table(page_table *table, page_table::level lvl, bool free)
for (int i = 0; i < max; ++i) {
if (!table->is_present(i)) continue;
index[lvl] = i;
bool is_page =
lvl == page_table::level::pt ||
table->is_large_page(lvl, i);
@@ -295,24 +297,40 @@ page_manager::unmap_table(page_table *table, page_table::level lvl, bool free)
uintptr_t frame = table->entries[i] & ~0xfffull;
if (!free_count || frame != free_start + free_count * size) {
if (free_count && free) {
log::debug(logs::paging,
" freeing v:%016lx-%016lx p:%016lx-%016lx",
free_start_virt, free_start_virt + free_count * frame_size,
free_start, free_start + free_count * frame_size);
m_frames.free(free_start, (free_count * size) / frame_size);
free_count = 0;
}
if (!free_count)
if (!free_count) {
free_start = frame;
free_start_virt = index.addr();
}
}
free_count += 1;
} else {
page_table *next = table->get(i);
unmap_table(next, page_table::deeper(lvl), free);
unmap_table(next, page_table::deeper(lvl), free, index);
}
}
if (free_count && free)
if (free_count && free) {
log::debug(logs::paging,
" freeing v:%016lx-%016lx p:%016lx-%016lx",
free_start_virt, free_start_virt + free_count * frame_size,
free_start, free_start + free_count * frame_size);
m_frames.free(free_start, (free_count * size) / frame_size);
}
free_table_pages(table, 1);
log::debug(logs::paging, "Unmapped%s lv %d table at %016lx",
free ? " (and freed)" : "", lvl, table);
}
void

View File

@@ -162,7 +162,8 @@ private:
bool free = false);
/// Low-level routine for unmapping an entire table of memory at once
void unmap_table(page_table *table, page_table::level lvl, bool free);
void unmap_table(page_table *table, page_table::level lvl, bool free,
page_table_indices index = {});
page_table *m_kernel_pml4; ///< The PML4 of just kernel pages
free_page_header *m_page_cache; ///< Cache of free pages to use for tables

View File

@@ -12,6 +12,7 @@ process::exit(uint32_t code)
{
return_code = code;
flags -= process_flags::running;
page_manager::get()->delete_process_map(pml4);
}
pid_t