Clean up process memory on exit.

Additionally, there were several bug fixes needed to allow this:
- frame_allocator was allocating its frame_blocks from the heap, causing
  a circular dependency. Now it gives itself a page on its own when
  needed.
- frame_allocator::free was putting any trailing pages in a block back
  into the list after the current block, so they would be the next block
  iterated to.
- frame_allocator::free was updating the address it was looking for
  after freeing some pages, but not the count it was looking for, so it
  would eventually free all pages after the initial address.
This commit is contained in:
Justin C. Miller
2019-04-06 11:19:38 -07:00
parent c605793a9d
commit 863555ec6b
8 changed files with 110 additions and 31 deletions

View File

@@ -1,7 +1,12 @@
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/memory.h"
namespace kutil {
using memory::frame_size;
using memory::page_offset;
int
frame_block::compare(const frame_block *rhs) const
{
@@ -53,7 +58,7 @@ frame_block::copy(frame_block *other)
frame_allocator::frame_allocator(
frame_block_list cache)
{
m_block_slab.append(cache);
m_cache.append(cache);
}
void
@@ -65,11 +70,37 @@ frame_allocator::init(
m_used.append(used);
}
list_node<frame_block> *
frame_allocator::get_block_node()
{
if (m_cache.empty()) {
auto *first = m_free.front();
frame_block_node * start =
reinterpret_cast<frame_block_node*>(first->address + page_offset);
frame_block_node * end = offset_pointer(start, frame_size);
if (first->count == 1) {
m_free.remove(first);
} else {
first->count--;
first->address += frame_size;
}
while (start < end) {
m_cache.push_back(start);
start++;
}
}
return m_cache.pop_front();
}
void
frame_allocator::consolidate_blocks()
{
m_block_slab.append(frame_block::consolidate(m_free));
m_block_slab.append(frame_block::consolidate(m_used));
m_cache.append(frame_block::consolidate(m_free));
m_cache.append(frame_block::consolidate(m_used));
}
size_t
@@ -86,7 +117,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
m_free.remove(first);
m_used.sorted_insert(first);
} else {
auto *used = m_block_slab.pop();
auto *used = get_block_node();
used->copy(first);
used->count = n;
m_used.sorted_insert(used);
@@ -95,7 +126,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
first->count -= n;
}
m_block_slab.append(frame_block::consolidate(m_used));
consolidate_blocks();
return n;
}
@@ -118,7 +149,7 @@ frame_allocator::free(uintptr_t address, size_t count)
if (leading) {
size_t frames = leading / frame_size;
auto *lead_block = m_block_slab.pop();
auto *lead_block = get_block_node();
lead_block->copy(block);
lead_block->count = frames;
@@ -132,26 +163,28 @@ frame_allocator::free(uintptr_t address, size_t count)
if (trailing) {
size_t frames = trailing / frame_size;
auto *trail_block = m_block_slab.pop();
auto *trail_block = get_block_node();
trail_block->copy(block);
trail_block->count = frames;
trail_block->address += size;
trail_block->address += size;
block->count -= frames;
m_used.insert_after(block, trail_block);
m_used.insert_before(block, trail_block);
}
address += block->count * frame_size;
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
address += block->count * frame_size;
count -= block->count;
if (!count)
break;
}
kassert(block_count, "Couldn't find existing allocated frames to free");
consolidate_blocks();
}
} // namespace kutil