Clean up process memory on exit.

Additionally, there were several bug fixes needed to allow this:
- frame_allocator was allocating its frame_blocks from the heap, causing
  a circular dependency. Now it gives itself a page on its own when
  needed.
- frame_allocator::free was putting any trailing pages in a block back
  into the list after the current block, so they would be the next block
  iterated to.
- frame_allocator::free was updating the address it was looking for
  after freeing some pages, but not the count it was looking for, so it
  would eventually free all pages after the initial address.
This commit is contained in:
Justin C. Miller
2019-04-06 11:19:38 -07:00
parent c605793a9d
commit 863555ec6b
8 changed files with 110 additions and 31 deletions

View File

@@ -1,7 +1,12 @@
#include "kutil/assert.h"
#include "kutil/frame_allocator.h"
#include "kutil/memory.h"
namespace kutil {
using memory::frame_size;
using memory::page_offset;
int
frame_block::compare(const frame_block *rhs) const
{
@@ -53,7 +58,7 @@ frame_block::copy(frame_block *other)
frame_allocator::frame_allocator(
frame_block_list cache)
{
m_block_slab.append(cache);
m_cache.append(cache);
}
void
@@ -65,11 +70,37 @@ frame_allocator::init(
m_used.append(used);
}
list_node<frame_block> *
frame_allocator::get_block_node()
{
if (m_cache.empty()) {
auto *first = m_free.front();
frame_block_node * start =
reinterpret_cast<frame_block_node*>(first->address + page_offset);
frame_block_node * end = offset_pointer(start, frame_size);
if (first->count == 1) {
m_free.remove(first);
} else {
first->count--;
first->address += frame_size;
}
while (start < end) {
m_cache.push_back(start);
start++;
}
}
return m_cache.pop_front();
}
void
frame_allocator::consolidate_blocks()
{
m_block_slab.append(frame_block::consolidate(m_free));
m_block_slab.append(frame_block::consolidate(m_used));
m_cache.append(frame_block::consolidate(m_free));
m_cache.append(frame_block::consolidate(m_used));
}
size_t
@@ -86,7 +117,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
m_free.remove(first);
m_used.sorted_insert(first);
} else {
auto *used = m_block_slab.pop();
auto *used = get_block_node();
used->copy(first);
used->count = n;
m_used.sorted_insert(used);
@@ -95,7 +126,7 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
first->count -= n;
}
m_block_slab.append(frame_block::consolidate(m_used));
consolidate_blocks();
return n;
}
@@ -118,7 +149,7 @@ frame_allocator::free(uintptr_t address, size_t count)
if (leading) {
size_t frames = leading / frame_size;
auto *lead_block = m_block_slab.pop();
auto *lead_block = get_block_node();
lead_block->copy(block);
lead_block->count = frames;
@@ -132,26 +163,28 @@ frame_allocator::free(uintptr_t address, size_t count)
if (trailing) {
size_t frames = trailing / frame_size;
auto *trail_block = m_block_slab.pop();
auto *trail_block = get_block_node();
trail_block->copy(block);
trail_block->count = frames;
trail_block->address += size;
trail_block->address += size;
block->count -= frames;
m_used.insert_after(block, trail_block);
m_used.insert_before(block, trail_block);
}
address += block->count * frame_size;
m_used.remove(block);
m_free.sorted_insert(block);
++block_count;
address += block->count * frame_size;
count -= block->count;
if (!count)
break;
}
kassert(block_count, "Couldn't find existing allocated frames to free");
consolidate_blocks();
}
} // namespace kutil

View File

@@ -4,23 +4,19 @@
#include <stdint.h>
#include "kernel_memory.h"
#include "kutil/enum_bitfields.h"
#include "kutil/linked_list.h"
#include "kutil/slab_allocator.h"
namespace kutil {
struct frame_block;
using frame_block_list = kutil::linked_list<frame_block>;
using frame_block_slab = kutil::slab_allocator<frame_block>;
using frame_block_list = linked_list<frame_block>;
/// Allocator for physical memory frames
class frame_allocator
{
public:
/// Size of a single page frame.
static const size_t frame_size = 0x1000;
/// Default constructor
frame_allocator() = default;
@@ -53,9 +49,13 @@ public:
void consolidate_blocks();
private:
using frame_block_node = list_node<frame_block>;
frame_block_list m_free; ///< Free frames list
frame_block_list m_used; ///< In-use frames list
frame_block_slab m_block_slab; ///< frame_block slab allocator
frame_block_list m_cache; ///< Spare frame-block structs
frame_block_node *get_block_node();
frame_allocator(const frame_allocator &) = delete;
};
@@ -97,7 +97,7 @@ struct frame_block
frame_block_flags flags;
inline bool has_flag(frame_block_flags f) const { return bitfield_has(flags, f); }
inline uintptr_t end() const { return address + (count * frame_allocator::frame_size); }
inline uintptr_t end() const { return address + (count * memory::frame_size); }
inline bool contains(uintptr_t addr) const { return addr >= address && addr < end(); }
/// Helper to zero out a block and optionally set the next pointer.

View File

@@ -116,27 +116,35 @@ public:
/// Constructor. Creates an empty list.
linked_list() :
m_head(nullptr),
m_tail(nullptr)
m_tail(nullptr),
m_count(0)
{}
/// Move constructor. Takes ownership of list elements.
linked_list(linked_list<T> &&other) :
m_head(other.m_head),
m_tail(other.m_tail)
m_tail(other.m_tail),
m_count(other.m_count)
{
other.m_head = other.m_tail = nullptr;
other.m_count = 0;
}
/// Check if the list is empty.
/// \returns true if the list is empty
bool empty() const { return m_head == nullptr; }
/// Get the cached length of the list.
/// \returns The number of entries in the list.
size_t length() const { return m_count; }
/// Count the items in the list.
/// \returns The number of entries in the list.
size_t length() const
size_t count_length() const
{
size_t len = 0;
for (item_type *cur = m_head; cur; cur = cur->m_next) ++len;
m_count = len;
return len;
}
@@ -164,6 +172,8 @@ public:
item->m_prev = nullptr;
m_head = item;
}
m_count += 1;
}
/// Append an item to the end of this list.
@@ -182,6 +192,8 @@ public:
item->m_next = nullptr;
m_tail = item;
}
m_count += 1;
}
/// Remove an item from the front of this list.
@@ -217,6 +229,8 @@ public:
m_tail = list.m_tail;
}
m_count += list.m_count;
list.m_count = 0;
list.m_head = list.m_tail = nullptr;
}
@@ -235,6 +249,8 @@ public:
m_tail = list.m_tail;
}
m_count += list.m_count;
list.m_count = 0;
list.m_head = list.m_tail = nullptr;
}
@@ -248,6 +264,7 @@ public:
if (item == m_tail)
m_tail = item->m_prev;
item->remove();
m_count -= 1;
}
/// Inserts an item into the list before another given item.
@@ -263,6 +280,8 @@ public:
push_front(item);
else
existing->insert_before(item);
m_count += 1;
}
/// Inserts an item into the list after another given item.
@@ -278,6 +297,8 @@ public:
push_back(item);
else
existing->insert_after(item);
m_count += 1;
}
/// Insert an item into the list in a sorted position. Depends on T
@@ -309,6 +330,7 @@ public:
private:
item_type *m_head;
item_type *m_tail;
size_t m_count;
};

View File

@@ -15,6 +15,7 @@ class slab_allocator :
{
public:
using item_type = list_node<T>;
using alloc_type = Alloc;
/// Default constructor.
/// \arg chunk_size The size of chunk to allocate, in bytes. 0 means default.