diff --git a/src/include/j6/flags.h b/src/include/j6/flags.h index 3d1bb93..d4afee5 100644 --- a/src/include/j6/flags.h +++ b/src/include/j6/flags.h @@ -6,5 +6,5 @@ enum j6_vm_flags { #define VM_FLAG(name, v) j6_vm_flag_ ## name = v, #include "j6/tables/vm_flags.inc" #undef VM_FLAG - j6_vm_flags_MAX + j6_vm_flag_MAX }; diff --git a/src/include/j6/tables/vm_flags.inc b/src/include/j6/tables/vm_flags.inc index 3621f36..dbb9df1 100644 --- a/src/include/j6/tables/vm_flags.inc +++ b/src/include/j6/tables/vm_flags.inc @@ -1,7 +1,6 @@ VM_FLAG( none, 0x00000000) VM_FLAG( write, 0x00000001) VM_FLAG( exec, 0x00000002) -VM_FLAG( zero, 0x00000010) VM_FLAG( contiguous, 0x00000020) VM_FLAG( large_pages, 0x00000100) VM_FLAG( huge_pages, 0x00000200) diff --git a/src/include/kernel_memory.h b/src/include/kernel_memory.h index 2899a3b..f3b1b36 100644 --- a/src/include/kernel_memory.h +++ b/src/include/kernel_memory.h @@ -10,6 +10,9 @@ namespace memory { /// Size of a single page frame. constexpr size_t frame_size = 0x1000; + /// Number of bits of addressing within a page + constexpr size_t frame_bits = 12; + /// Start of kernel memory. constexpr uintptr_t kernel_offset = 0xffff800000000000ull; diff --git a/src/kernel/objects/vm_area.cpp b/src/kernel/objects/vm_area.cpp index 01f4dfa..32b76e3 100644 --- a/src/kernel/objects/vm_area.cpp +++ b/src/kernel/objects/vm_area.cpp @@ -27,20 +27,21 @@ vm_area::add_to(vm_space *space) return true; } -bool +void vm_area::remove_from(vm_space *space) { m_spaces.remove_swap(space); - return - !m_spaces.count() && - !(m_flags && vm_flags::mmio); + if (!m_spaces.count() && + check_signal(j6_signal_no_handles)) + delete this; } void vm_area::on_no_handles() { kobject::on_no_handles(); - delete this; + if (!m_spaces.count()) + delete this; } size_t @@ -66,15 +67,24 @@ vm_area_fixed::vm_area_fixed(uintptr_t start, size_t size, vm_flags flags) : { } -vm_area_fixed::~vm_area_fixed() {} +vm_area_fixed::~vm_area_fixed() +{ + if (m_flags && vm_flags::mmio) + return; -size_t vm_area_fixed::resize(size_t size) + size_t pages = memory::page_count(m_size); + frame_allocator::get().free(m_start, pages); +} + +size_t +vm_area_fixed::resize(size_t size) { // Not resizable return m_size; } -bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys) +bool +vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys) { if (offset > m_size) return false; @@ -83,13 +93,15 @@ bool vm_area_fixed::get_page(uintptr_t offset, uintptr_t &phys) return true; } - vm_area_untracked::vm_area_untracked(size_t size, vm_flags flags) : vm_area {size, flags} { } -vm_area_untracked::~vm_area_untracked() {} +vm_area_untracked::~vm_area_untracked() +{ + kassert(false, "An untracked VMA's pages cannot be reclaimed, leaking memory"); +} bool vm_area_untracked::get_page(uintptr_t offset, uintptr_t &phys) @@ -115,7 +127,11 @@ vm_area_open::vm_area_open(size_t size, vm_flags flags) : { } -vm_area_open::~vm_area_open() {} +vm_area_open::~vm_area_open() +{ + // the page_tree will free its pages when deleted + delete m_mapped; +} bool vm_area_open::get_page(uintptr_t offset, uintptr_t &phys) @@ -128,7 +144,7 @@ vm_area_guarded::vm_area_guarded(uintptr_t start, size_t buf_pages, size_t size, m_start {start}, m_pages {buf_pages}, m_next {memory::frame_size}, - vm_area_untracked {size, flags} + vm_area_open {size, flags} { } @@ -164,6 +180,5 @@ vm_area_guarded::get_page(uintptr_t offset, uintptr_t &phys) if ((offset >> 12) % (m_pages+1) == 0) return false; - return vm_area_untracked::get_page(offset, phys); + return vm_area_open::get_page(offset, phys); } - diff --git a/src/kernel/objects/vm_area.h b/src/kernel/objects/vm_area.h index 4d0ee56..43399aa 100644 --- a/src/kernel/objects/vm_area.h +++ b/src/kernel/objects/vm_area.h @@ -52,9 +52,7 @@ public: /// Track that this area was removed frm a vm_space /// \arg space The space that is removing this area - /// \returns True if the removing space should free the pages - /// mapped for this area - virtual bool remove_from(vm_space *space); + virtual void remove_from(vm_space *space); /// Change the virtual size of the memory area. This may cause /// deallocation if the new size is smaller than the current size. @@ -141,9 +139,8 @@ public: /// Area split into standard-sized segments, separated by guard pages. -/// Based on vm_area_untracked, can not be shared. class vm_area_guarded : - public vm_area_untracked + public vm_area_open { public: /// Constructor. diff --git a/src/kernel/page_tree.cpp b/src/kernel/page_tree.cpp index d1b492c..7188e88 100644 --- a/src/kernel/page_tree.cpp +++ b/src/kernel/page_tree.cpp @@ -4,10 +4,10 @@ #include "kernel_memory.h" #include "page_tree.h" -// Page tree levels map the following parts of a pagewise offset. Note the xxx -// are not part of the offset but represent the bits added for the actual virtual -// address. (Also note that level 0's entries are physical page addrs, the rest -// map other page_tree nodes) +// Page tree levels map the following parts of an offset. Note the xxx part of +// the offset but represent the bits of the actual sub-page virtual address. +// (Also note that level 0's entries are physical page addrs, the rest map +// other page_tree nodes) // // Level 0: 0000 0000 0003 fxxx 64 pages / 256 KiB // Level 1: 0000 0000 00fc 0xxx 4K pages / 16 MiB -- 24-bit addressing @@ -36,6 +36,20 @@ page_tree::page_tree(uint64_t base, uint8_t level) : kutil::memset(m_entries, 0, sizeof(m_entries)); } +page_tree::~page_tree() +{ + if (m_level) { + for (auto &e : m_entries) + delete e.child; + } else { + auto &fa = frame_allocator::get(); + for (auto &e : m_entries) { + if (e.entry & 1) + fa.free(e.entry & ~0xfffull, 1); + } + } +} + bool page_tree::contains(uint64_t offset, uint8_t &index) const { diff --git a/src/kernel/page_tree.h b/src/kernel/page_tree.h index e7a5df6..bf31350 100644 --- a/src/kernel/page_tree.h +++ b/src/kernel/page_tree.h @@ -23,6 +23,8 @@ public: /// \returns True if a page was found static bool find_or_add(page_tree * &root, uint64_t offset, uintptr_t &page); + ~page_tree(); + private: page_tree(uint64_t base, uint8_t level); diff --git a/src/kernel/vm_space.cpp b/src/kernel/vm_space.cpp index ae4d369..84b9988 100644 --- a/src/kernel/vm_space.cpp +++ b/src/kernel/vm_space.cpp @@ -45,11 +45,8 @@ vm_space::vm_space() : vm_space::~vm_space() { - for (auto &a : m_areas) { - bool free = a.area->remove_from(this); - clear(*a.area, 0, memory::page_count(a.area->size()), free); - a.area->handle_release(); - } + for (auto &a : m_areas) + remove_area(a.area); kassert(!is_kernel(), "Kernel vm_space destructor!"); if (active()) @@ -76,15 +73,21 @@ vm_space::add(uintptr_t base, vm_area *area) return true; } +void +vm_space::remove_area(vm_area *area) +{ + area->remove_from(this); + clear(*area, 0, memory::page_count(area->size())); + area->handle_release(); +} + bool vm_space::remove(vm_area *area) { for (auto &a : m_areas) { if (a.area == area) { - bool free = area->remove_from(this); - clear(*area, 0, memory::page_count(area->size()), free); + remove_area(area); m_areas.remove(a); - area->handle_release(); return true; } } @@ -277,10 +280,6 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault) if (!area->get_page(offset, phys_page)) return false; - void *mem = memory::to_virtual(phys_page); - if (area->flags() && vm_flags::zero) - kutil::memset(mem, 0, memory::frame_size); - page_in(*area, offset, phys_page, 1); return true; } diff --git a/src/kernel/vm_space.h b/src/kernel/vm_space.h index abdfd49..c61ac8d 100644 --- a/src/kernel/vm_space.h +++ b/src/kernel/vm_space.h @@ -107,7 +107,6 @@ public: private: friend class vm_area; - friend class vm_mapper_multi; /// Find a given VMA in this address space bool find_vma(const vm_area &vma, uintptr_t &base) const; @@ -118,6 +117,9 @@ private: /// Copy a range of mappings from the given address space void copy_from(const vm_space &source, const vm_area &vma); + /// Remove an area's mappings from this space + void remove_area(vm_area *area); + bool m_kernel; page_table *m_pml4;