From 8c0d52d0feea3b990e4e19e65133b33b537ff88f Mon Sep 17 00:00:00 2001 From: "Justin C. Miller" Date: Wed, 10 Feb 2021 23:57:51 -0800 Subject: [PATCH] [kernel] Add spinlocks to vm_space, frame_allocator Also updated spinlock interface to be an object, and added a scoped lock object that uses it as well. --- src/kernel/cpu.h | 2 - src/kernel/frame_allocator.cpp | 12 ++++-- src/kernel/frame_allocator.h | 5 ++- src/kernel/vm_space.cpp | 4 +- src/kernel/vm_space.h | 3 ++ src/libraries/kutil/include/kutil/spinlock.h | 42 ++++++++++++++++---- src/libraries/kutil/spinlock.cpp | 29 +++++++------- 7 files changed, 68 insertions(+), 29 deletions(-) diff --git a/src/kernel/cpu.h b/src/kernel/cpu.h index b1f2f05..a5afa35 100644 --- a/src/kernel/cpu.h +++ b/src/kernel/cpu.h @@ -32,8 +32,6 @@ struct cpu_data TSS *tss; GDT *gdt; - // Values from here on don't need to be in the asm version - kutil::spinlock::node spinner; }; extern "C" cpu_data * _current_gsbase(); diff --git a/src/kernel/frame_allocator.cpp b/src/kernel/frame_allocator.cpp index c68b957..169859f 100644 --- a/src/kernel/frame_allocator.cpp +++ b/src/kernel/frame_allocator.cpp @@ -1,6 +1,6 @@ -#include "kernel_memory.h" #include "kutil/assert.h" #include "kutil/memory.h" + #include "frame_allocator.h" #include "kernel_args.h" #include "kernel_memory.h" @@ -17,8 +17,8 @@ frame_allocator::get() } frame_allocator::frame_allocator(kernel::args::frame_block *frames, size_t count) : - m_blocks(frames), - m_count(count) + m_blocks {frames}, + m_count {count} { } @@ -32,6 +32,8 @@ bsf(uint64_t v) size_t frame_allocator::allocate(size_t count, uintptr_t *address) { + kutil::scoped_lock lock {m_lock}; + for (long i = m_count - 1; i >= 0; --i) { frame_block &block = m_blocks[i]; @@ -80,6 +82,8 @@ frame_allocator::allocate(size_t count, uintptr_t *address) void frame_allocator::free(uintptr_t address, size_t count) { + kutil::scoped_lock lock {m_lock}; + kassert(address % frame_size == 0, "Trying to free a non page-aligned frame!"); if (!count) @@ -116,6 +120,8 @@ frame_allocator::free(uintptr_t address, size_t count) void frame_allocator::used(uintptr_t address, size_t count) { + kutil::scoped_lock lock {m_lock}; + kassert(address % frame_size == 0, "Trying to mark a non page-aligned frame!"); if (!count) diff --git a/src/kernel/frame_allocator.h b/src/kernel/frame_allocator.h index 1060ff4..78f1411 100644 --- a/src/kernel/frame_allocator.h +++ b/src/kernel/frame_allocator.h @@ -3,6 +3,7 @@ /// Allocator for physical memory frames #include +#include "kutil/spinlock.h" namespace kernel { namespace args { @@ -43,7 +44,9 @@ public: private: frame_block *m_blocks; - long m_count; + size_t m_count; + + kutil::spinlock m_lock; frame_allocator() = delete; frame_allocator(const frame_allocator &) = delete; diff --git a/src/kernel/vm_space.cpp b/src/kernel/vm_space.cpp index c8f0136..6c045db 100644 --- a/src/kernel/vm_space.cpp +++ b/src/kernel/vm_space.cpp @@ -33,7 +33,7 @@ vm_space::vm_space(page_table *p) : {} vm_space::vm_space() : - m_kernel(false) + m_kernel {false} { m_pml4 = page_table::get_table_page(); page_table *kpml4 = kernel_space().m_pml4; @@ -163,6 +163,7 @@ void vm_space::page_in(const vm_area &vma, uintptr_t offset, uintptr_t phys, size_t count) { using memory::frame_size; + kutil::scoped_lock lock {m_lock}; uintptr_t base = 0; if (!find_vma(vma, base)) @@ -190,6 +191,7 @@ void vm_space::clear(const vm_area &vma, uintptr_t offset, size_t count, bool free) { using memory::frame_size; + kutil::scoped_lock lock {m_lock}; uintptr_t base = 0; if (!find_vma(vma, base)) diff --git a/src/kernel/vm_space.h b/src/kernel/vm_space.h index de82c69..4437332 100644 --- a/src/kernel/vm_space.h +++ b/src/kernel/vm_space.h @@ -4,6 +4,7 @@ #include #include "kutil/enum_bitfields.h" +#include "kutil/spinlock.h" #include "kutil/vector.h" #include "page_table.h" @@ -127,6 +128,8 @@ private: bool operator==(const struct area &o) const; }; kutil::vector m_areas; + + kutil::spinlock m_lock; }; IS_BITFIELD(vm_space::fault_type); diff --git a/src/libraries/kutil/include/kutil/spinlock.h b/src/libraries/kutil/include/kutil/spinlock.h index a41e35e..68a059c 100644 --- a/src/libraries/kutil/include/kutil/spinlock.h +++ b/src/libraries/kutil/include/kutil/spinlock.h @@ -4,17 +4,43 @@ #pragma once namespace kutil { -namespace spinlock { -/// An MCS based spinlock node -struct node +/// An MCS based spinlock +class spinlock { - bool locked; - node *next; +public: + spinlock(); + ~spinlock(); + + /// A node in the wait queue. + struct waiter + { + bool locked; + waiter *next; + }; + + void acquire(waiter *w); + void release(waiter *w); + +private: + waiter *m_lock; }; -void aquire(node *lock, node *waiter); -void release(node *lock, node *waiter); +/// Scoped lock that owns a spinlock::waiter +class scoped_lock +{ +public: + inline scoped_lock(spinlock &lock) : m_lock(lock) { + m_lock.acquire(&m_waiter); + } + + inline ~scoped_lock() { + m_lock.release(&m_waiter); + } + +private: + spinlock &m_lock; + spinlock::waiter m_waiter; +}; -} // namespace spinlock } // namespace kutil diff --git a/src/libraries/kutil/spinlock.cpp b/src/libraries/kutil/spinlock.cpp index b6daba9..6c549b5 100644 --- a/src/libraries/kutil/spinlock.cpp +++ b/src/libraries/kutil/spinlock.cpp @@ -1,48 +1,49 @@ #include "kutil/spinlock.h" namespace kutil { -namespace spinlock { static constexpr int memorder = __ATOMIC_SEQ_CST; +spinlock::spinlock() : m_lock {nullptr} {} +spinlock::~spinlock() {} + void -aquire(node * &lock, node *waiter) +spinlock::acquire(waiter *w) { - waiter->next = nullptr; - waiter->locked = true; + w->next = nullptr; + w->locked = true; // Point the lock at this waiter - node *prev = __atomic_exchange_n(&lock, waiter, memorder); + waiter *prev = __atomic_exchange_n(&m_lock, w, memorder); if (prev) { // If there was a previous waiter, wait for them to // unblock us - prev->next = waiter; - while (waiter->locked) { + prev->next = w; + while (w->locked) { asm ("pause"); } } else { - waiter->locked = false; + w->locked = false; } } void -release(node * &lock, node *waiter) +spinlock::release(waiter *w) { - if (!waiter->next) { + if (!w->next) { // If we're still the last waiter, we're done - if(__atomic_compare_exchange_n(&lock, &waiter, nullptr, false, memorder, memorder)) + if(__atomic_compare_exchange_n(&m_lock, &w, nullptr, false, memorder, memorder)) return; } // Wait for the subseqent waiter to tell us who they are - while (!waiter->next) { + while (!w->next) { asm ("pause"); } // Unblock the subseqent waiter - waiter->next->locked = false; + w->next->locked = false; } -} // namespace spinlock } // namespace kutil