diff --git a/src/include/kernel_memory.h b/src/include/kernel_memory.h index 92cb2d9..fe8d0c2 100644 --- a/src/include/kernel_memory.h +++ b/src/include/kernel_memory.h @@ -58,4 +58,10 @@ namespace memory { return reinterpret_cast(a|page_offset); } + /// Get the number of pages needed for a given number of bytes. + /// \arg bytes The number of bytes desired + /// \returns The number of pages needed to contain the desired bytes + inline size_t page_count(size_t bytes) { + return ((bytes - 1) & (frame_size - 1)) + 1; + } } // namespace memory diff --git a/src/kernel/objects/endpoint.cpp b/src/kernel/objects/endpoint.cpp index 2450574..f74801c 100644 --- a/src/kernel/objects/endpoint.cpp +++ b/src/kernel/objects/endpoint.cpp @@ -1,7 +1,9 @@ #include "objects/endpoint.h" +#include "objects/process.h" #include "objects/thread.h" #include "page_manager.h" #include "scheduler.h" +#include "vm_space.h" endpoint::endpoint() : kobject(kobject::type::endpoint) @@ -86,9 +88,9 @@ endpoint::do_message_copy(const endpoint::thread_data &sender, endpoint::thread_ return j6_err_insufficient; page_manager *pm = page_manager::get(); - void *send_data = pm->get_offset_from_mapped(sender.data, sender.th->tcb()->pml4); - void *recv_data = pm->get_offset_from_mapped(receiver.data, receiver.th->tcb()->pml4); - kutil::memcpy(recv_data, send_data, sender.len); + vm_space &source = sender.th->parent().space(); + vm_space &dest = receiver.th->parent().space(); + vm_space::copy(source, dest, sender.data, receiver.data, sender.len); *receiver.len_p = sender.len; // TODO: this will not work if non-contiguous pages are mapped!! diff --git a/src/kernel/objects/process.cpp b/src/kernel/objects/process.cpp index f8333ea..020779b 100644 --- a/src/kernel/objects/process.cpp +++ b/src/kernel/objects/process.cpp @@ -7,10 +7,8 @@ kutil::vector process::s_processes; -process::process(page_table *pml4) : +process::process() : kobject(kobject::type::process), - m_pml4(pml4), - m_space(pml4), m_next_handle(0), m_state(state::running) { @@ -41,7 +39,6 @@ process::exit(unsigned code) thread->exit(code); } m_return_code = code; - page_manager::get()->delete_process_map(m_pml4); assert_signal(j6_signal_process_exit); } @@ -76,13 +73,7 @@ process::create_thread(uint8_t priority, bool user) if (user) { uintptr_t stack_top = stacks_top - (m_threads.count() * stack_size); - auto *pm = page_manager::get(); - pm->map_pages( - stack_top - stack_size, - page_manager::page_count(stack_size), - true, // user stack - m_pml4); - + m_space.allow(stack_top - stack_size, stack_size, true); th->tcb()->rsp3 = stack_top; } diff --git a/src/kernel/objects/process.h b/src/kernel/objects/process.h index c4216c0..064a0e9 100644 --- a/src/kernel/objects/process.h +++ b/src/kernel/objects/process.h @@ -19,8 +19,7 @@ public: constexpr static size_t stack_size = 0x4000; /// Constructor. - /// \args pml4 Root of the process' page tables - process(page_table *pml4); + process(); /// Destructor. virtual ~process(); @@ -35,9 +34,6 @@ public: /// Update internal bookkeeping about threads. void update(); - /// Get the process' page table root - page_table * pml4() { return m_pml4; } - /// Get the process' virtual memory space vm_space & space() { return m_space; } @@ -75,7 +71,6 @@ public: private: uint32_t m_return_code; - page_table *m_pml4; vm_space m_space; kutil::vector m_threads; diff --git a/src/kernel/objects/thread.cpp b/src/kernel/objects/thread.cpp index f23dc8c..f51079e 100644 --- a/src/kernel/objects/thread.cpp +++ b/src/kernel/objects/thread.cpp @@ -17,7 +17,7 @@ thread::thread(process &parent, uint8_t pri, uintptr_t rsp0) : m_wait_data(0), m_wait_obj(0) { - m_tcb.pml4 = parent.pml4(); + parent.space().initialize_tcb(m_tcb); m_tcb.priority = pri; if (!rsp0) diff --git a/src/kernel/objects/thread.h b/src/kernel/objects/thread.h index 542521b..49f6e8e 100644 --- a/src/kernel/objects/thread.h +++ b/src/kernel/objects/thread.h @@ -15,7 +15,7 @@ struct TCB uintptr_t rsp; uintptr_t rsp0; uintptr_t rsp3; - page_table *pml4; + uintptr_t pml4; uint8_t priority; // note: 3 bytes padding diff --git a/src/kernel/page_manager.cpp b/src/kernel/page_manager.cpp index afadc35..f48df9e 100644 --- a/src/kernel/page_manager.cpp +++ b/src/kernel/page_manager.cpp @@ -45,55 +45,6 @@ page_manager::page_manager(frame_allocator &frames, page_table *pml4) : { } -page_table * -page_manager::create_process_map() -{ - page_table *table = page_table::get_table_page(); - - kutil::memset(table, 0, frame_size/2); - for (unsigned i = pml4e_kernel; i < table_entries; ++i) - table->entries[i] = m_kernel_pml4->entries[i]; - - return table; -} - -void -page_manager::delete_process_map(page_table *pml4) -{ - bool was_pml4 = (pml4 == get_pml4()); - if (was_pml4) - set_pml4(m_kernel_pml4); - - log::info(logs::paging, "Deleting process pml4 at %016lx%s", - pml4, was_pml4 ? " (was current)" : ""); - - unmap_table(pml4, page_table::level::pml4, true); -} - -void * -page_manager::get_offset_from_mapped(void *p, page_table *pml4) -{ - if (!pml4) pml4 = get_pml4(); - uintptr_t v = reinterpret_cast(p); - - page_table_indices idx{v}; - page_table *tables[4] = {pml4, nullptr, nullptr, nullptr}; - - for (int i = 1; i < 4; ++i) { - tables[i] = tables[i-1]->get(idx[i-1]); - if (!tables[i]) - return nullptr; - } - - uintptr_t a = tables[3]->entries[idx[3]]; - if (!(a & 1)) - return nullptr; - - return memory::to_virtual( - (a & ~0xfffull) | - (v & 0xfffull)); -} - void page_manager::dump_pml4(page_table *pml4, bool recurse) { diff --git a/src/kernel/page_manager.h b/src/kernel/page_manager.h index 28b46d9..ba37274 100644 --- a/src/kernel/page_manager.h +++ b/src/kernel/page_manager.h @@ -49,15 +49,6 @@ public: __asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p) ); } - /// Allocate but don't switch to a new PML4 table. This table - /// should only have global kernel pages mapped. - /// \returns A pointer to the PML4 table - page_table * create_process_map(); - - /// Deallocate a process' PML4 table and entries. - /// \arg pml4 The process' PML4 table - void delete_process_map(page_table *pml4); - /// Allocate and map pages into virtual memory. /// \arg address The virtual address at which to map the pages /// \arg count The number of pages to map @@ -72,11 +63,6 @@ public: /// \arg pml4 The pml4 to unmap from - null for the current one void unmap_pages(void *address, size_t count, page_table *pml4 = nullptr); - /// Get the offet-mapped virtual address of a normal virtual address - /// \arg p Virtual address - /// \returns Virtual address in offset-mapped linear space - void * get_offset_from_mapped(void *p, page_table *pml4 = nullptr); - /// Dump the given or current PML4 to the console /// \arg pml4 The page table to use, null for the current one /// \arg recurse Whether to print sub-tables diff --git a/src/kernel/page_table.cpp b/src/kernel/page_table.cpp index 1094adc..5087e49 100644 --- a/src/kernel/page_table.cpp +++ b/src/kernel/page_table.cpp @@ -12,6 +12,7 @@ extern frame_allocator &g_frame_allocator; free_page_header * page_table::s_page_cache = nullptr; size_t page_table::s_cache_count = 0; +constexpr size_t page_table::entry_sizes[4]; // Flags: 0 0 0 0 0 0 0 0 0 0 1 1 = 0x0003 // IGNORED | | | | | | | +- Present @@ -88,7 +89,7 @@ page_table::iterator::align() const page_table::level page_table::iterator::depth() const { - for (level i = level::pml4; i < level::pt; ++i) + for (level i = level::pml4; i < level::page; ++i) if (!(entry(i) & 1)) return i; return level::pt; } @@ -248,6 +249,26 @@ page_table::fill_table_page_cache() } } +void +page_table::free(page_table::level l) +{ + unsigned last = l == level::pml4 + ? memory::pml4e_kernel + : memory::table_entries; + + for (unsigned i = 0; i < last; ++i) { + if (!is_present(i)) continue; + if (is_page(l, i)) { + size_t count = memory::page_count(entry_sizes[unsigned(l)]); + g_frame_allocator.free(entries[i] & ~0xfffull, count); + } else { + get(i)->free(l + 1); + } + } + + free_table_page(this); +} + void page_table::dump(page_table::level lvl, bool recurse) { diff --git a/src/kernel/page_table.h b/src/kernel/page_table.h index 9f428eb..4477849 100644 --- a/src/kernel/page_table.h +++ b/src/kernel/page_table.h @@ -76,7 +76,7 @@ struct page_table /// Get a *non-const* reference to the current table entry of /// the table at the given level. inline uint64_t & entry(level l) { - for (unsigned i = 1; i < unsigned(l); ++i) ensure_table(level(i)); + for (unsigned i = 1; i <= unsigned(l); ++i) ensure_table(level(i)); return table(l)->entries[index(l)]; } @@ -149,6 +149,10 @@ struct page_table /// Check if the given entry represents a page (of any size) inline bool is_page(level l, int i) const { return (l == level::pt) || is_large_page(l, i); } + /// Free this page table and all resources it references + /// \arg l The level of this page table + void free(level l); + /// Print this table to the debug console. void dump(level lvl = level::pml4, bool recurse = true); diff --git a/src/kernel/scheduler.cpp b/src/kernel/scheduler.cpp index 4cd7196..61f2de1 100644 --- a/src/kernel/scheduler.cpp +++ b/src/kernel/scheduler.cpp @@ -42,7 +42,7 @@ scheduler::scheduler(lapic *apic) : s_instance = this; page_table *pml4 = page_manager::get_pml4(); - process *kp = new process(pml4); + process *kp = new process; m_kernel_process = kp; log::debug(logs::task, "Kernel process koid %llx", kp->koid()); @@ -136,9 +136,9 @@ load_process_image(const void *image_start, size_t bytes, TCB *tcb) } thread * -scheduler::create_process(page_table *pml4, bool user) +scheduler::create_process(bool user) { - process *p = new process(pml4); + process *p = new process; thread *th = p->create_thread(default_priority, user); auto *tcb = th->tcb(); @@ -160,10 +160,7 @@ scheduler::load_process(const char *name, const void *data, size_t size) uint16_t kss = (2 << 3) | 0; // Kernel SS is GDT entry 2, ring 0 uint16_t ss = (4 << 3) | 3; // User SS is GDT entry 4, ring 3 - // Set up the page tables - this also allocates an initial user stack - page_table *pml4 = page_manager::get()->create_process_map(); - - thread* th = create_process(pml4, true); + thread* th = create_process(true); auto *tcb = th->tcb(); // Create an initial kernel stack space @@ -348,10 +345,11 @@ scheduler::schedule() m_apic->reset_timer(next->time_left); if (next != m_current) { + thread *next_thread = thread::from_tcb(next); + + bsp_cpu_data.t = next_thread; + bsp_cpu_data.p = &next_thread->parent(); m_current = next; - bsp_cpu_data.t = thread::from_tcb(m_current); - bsp_cpu_data.p = &th->parent(); - thread *next_thread = thread::from_tcb(m_current); log::debug(logs::task, "Scheduler switching threads %llx->%llx", th->koid(), next_thread->koid()); diff --git a/src/kernel/scheduler.h b/src/kernel/scheduler.h index f67bf2a..2551148 100644 --- a/src/kernel/scheduler.h +++ b/src/kernel/scheduler.h @@ -83,10 +83,9 @@ private: /// Create a new process object. This process will have its pid /// set but nothing else. - /// \arg pml4 The root page table of the process /// \arg user True if this thread will enter userspace /// \returns The new process' main thread - thread * create_process(page_table *pml4, bool user); + thread * create_process(bool user); void prune(uint64_t now); void check_promotions(uint64_t now); diff --git a/src/kernel/vm_space.cpp b/src/kernel/vm_space.cpp index f0c6c9c..90d8d5b 100644 --- a/src/kernel/vm_space.cpp +++ b/src/kernel/vm_space.cpp @@ -1,5 +1,5 @@ #include "log.h" -#include "objects/process.h" +#include "objects/thread.h" #include "objects/vm_area.h" #include "page_manager.h" #include "vm_space.h" @@ -24,12 +24,29 @@ vm_space::vm_space(page_table *p) : m_kernel(true), m_pml4(p) {} vm_space::vm_space() : m_kernel(false) { + m_pml4 = page_table::get_table_page(); + page_table *kpml4 = kernel_space().m_pml4; + + kutil::memset(m_pml4, 0, memory::frame_size/2); + for (unsigned i = memory::pml4e_kernel; i < memory::table_entries; ++i) + m_pml4->entries[i] = kpml4->entries[i]; } vm_space::~vm_space() { for (auto &a : m_areas) a.area->remove_from(this); + + kassert(!is_kernel(), "Kernel vm_space destructor!"); + + vm_space &kernel = kernel_space(); + + if (active()) + kernel.activate(); + + // All VMAs have been removed by now, so just + // free all remaining pages and tables + m_pml4->free(page_table::level::pml4); } vm_space & @@ -104,6 +121,30 @@ vm_space::allow(uintptr_t start, size_t length, bool allow) } } +bool +vm_space::active() const +{ + uintptr_t pml4 = 0; + __asm__ __volatile__ ( "mov %%cr3, %0" : "=r" (pml4) ); + return memory::to_virtual(pml4 & ~0xfffull) == m_pml4; +} + +void +vm_space::activate() const +{ + constexpr uint64_t phys_mask = ~memory::page_offset & ~0xfffull; + uintptr_t p = reinterpret_cast(m_pml4) & phys_mask; + __asm__ __volatile__ ( "mov %0, %%cr3" :: "r" (p) ); +} + +void +vm_space::initialize_tcb(TCB &tcb) +{ + tcb.pml4 = + reinterpret_cast(m_pml4) & + ~memory::page_offset; +} + bool vm_space::handle_fault(uintptr_t addr, fault_type fault) { @@ -124,3 +165,22 @@ vm_space::handle_fault(uintptr_t addr, fault_type fault) return true; } + +size_t +vm_space::copy(vm_space &source, vm_space &dest, void *from, void *to, size_t length) +{ + uintptr_t ifrom = reinterpret_cast(from); + uintptr_t ito = reinterpret_cast(to); + + page_table::iterator sit {ifrom, source.m_pml4}; + page_table::iterator dit {ito, dest.m_pml4}; + + // TODO: iterate page mappings and continue copying. For now i'm blindly + // assuming both buffers are fully contained within single pages + kutil::memcpy( + memory::to_virtual((*dit & ~0xfffull) | (ito & 0xffful)), + memory::to_virtual((*sit & ~0xfffull) | (ifrom & 0xffful)), + length); + + return length; +} diff --git a/src/kernel/vm_space.h b/src/kernel/vm_space.h index 1c984e2..44ed32f 100644 --- a/src/kernel/vm_space.h +++ b/src/kernel/vm_space.h @@ -8,6 +8,7 @@ struct page_table; class process; +struct TCB; class vm_area; /// Tracks a region of virtual memory address space @@ -64,6 +65,12 @@ public: /// \arg allow True if allocation should be allowed void allow(uintptr_t start, size_t length, bool allow); + /// Check if this space is the current active space + bool active() const; + + /// Set this space as the current active space + void activate() const; + enum class fault_type : uint8_t { none = 0x00, present = 0x01, @@ -79,11 +86,22 @@ public: /// \returns True if the fault was successfully handled bool handle_fault(uintptr_t addr, fault_type fault); + /// Set up a TCB to operate in this address space. + void initialize_tcb(TCB &tcb); + + /// Copy data from one address space to another + /// \arg source The address space data is being copied from + /// \arg dest The address space data is being copied to + /// \arg from Pointer to the data in the source address space + /// \arg to Pointer to the destination in the dest address space + /// \arg length Amount of data to copy, in bytes + /// \returnd The number of bytes copied + static size_t copy(vm_space &source, vm_space &dest, void *from, void *to, size_t length); + private: bool m_kernel; page_table *m_pml4; - struct area { uintptr_t base; vm_area *area;