[kernel] Add spinlocks to vm_space, frame_allocator

Also updated spinlock interface to be an object, and added a scoped lock
object that uses it as well.
This commit is contained in:
Justin C. Miller
2021-02-10 23:57:51 -08:00
parent 793bba95b5
commit 8c0d52d0fe
7 changed files with 68 additions and 29 deletions

View File

@@ -32,8 +32,6 @@ struct cpu_data
TSS *tss;
GDT *gdt;
// Values from here on don't need to be in the asm version
kutil::spinlock::node spinner;
};
extern "C" cpu_data * _current_gsbase();

View File

@@ -1,6 +1,6 @@
#include "kernel_memory.h"
#include "kutil/assert.h"
#include "kutil/memory.h"
#include "frame_allocator.h"
#include "kernel_args.h"
#include "kernel_memory.h"
@@ -17,8 +17,8 @@ frame_allocator::get()
}
frame_allocator::frame_allocator(kernel::args::frame_block *frames, size_t count) :
m_blocks(frames),
m_count(count)
m_blocks {frames},
m_count {count}
{
}
@@ -32,6 +32,8 @@ bsf(uint64_t v)
size_t
frame_allocator::allocate(size_t count, uintptr_t *address)
{
kutil::scoped_lock lock {m_lock};
for (long i = m_count - 1; i >= 0; --i) {
frame_block &block = m_blocks[i];
@@ -80,6 +82,8 @@ frame_allocator::allocate(size_t count, uintptr_t *address)
void
frame_allocator::free(uintptr_t address, size_t count)
{
kutil::scoped_lock lock {m_lock};
kassert(address % frame_size == 0, "Trying to free a non page-aligned frame!");
if (!count)
@@ -116,6 +120,8 @@ frame_allocator::free(uintptr_t address, size_t count)
void
frame_allocator::used(uintptr_t address, size_t count)
{
kutil::scoped_lock lock {m_lock};
kassert(address % frame_size == 0, "Trying to mark a non page-aligned frame!");
if (!count)

View File

@@ -3,6 +3,7 @@
/// Allocator for physical memory frames
#include <stdint.h>
#include "kutil/spinlock.h"
namespace kernel {
namespace args {
@@ -43,7 +44,9 @@ public:
private:
frame_block *m_blocks;
long m_count;
size_t m_count;
kutil::spinlock m_lock;
frame_allocator() = delete;
frame_allocator(const frame_allocator &) = delete;

View File

@@ -33,7 +33,7 @@ vm_space::vm_space(page_table *p) :
{}
vm_space::vm_space() :
m_kernel(false)
m_kernel {false}
{
m_pml4 = page_table::get_table_page();
page_table *kpml4 = kernel_space().m_pml4;
@@ -163,6 +163,7 @@ void
vm_space::page_in(const vm_area &vma, uintptr_t offset, uintptr_t phys, size_t count)
{
using memory::frame_size;
kutil::scoped_lock lock {m_lock};
uintptr_t base = 0;
if (!find_vma(vma, base))
@@ -190,6 +191,7 @@ void
vm_space::clear(const vm_area &vma, uintptr_t offset, size_t count, bool free)
{
using memory::frame_size;
kutil::scoped_lock lock {m_lock};
uintptr_t base = 0;
if (!find_vma(vma, base))

View File

@@ -4,6 +4,7 @@
#include <stdint.h>
#include "kutil/enum_bitfields.h"
#include "kutil/spinlock.h"
#include "kutil/vector.h"
#include "page_table.h"
@@ -127,6 +128,8 @@ private:
bool operator==(const struct area &o) const;
};
kutil::vector<area> m_areas;
kutil::spinlock m_lock;
};
IS_BITFIELD(vm_space::fault_type);

View File

@@ -4,17 +4,43 @@
#pragma once
namespace kutil {
namespace spinlock {
/// An MCS based spinlock node
struct node
/// An MCS based spinlock
class spinlock
{
public:
spinlock();
~spinlock();
/// A node in the wait queue.
struct waiter
{
bool locked;
node *next;
waiter *next;
};
void aquire(node *lock, node *waiter);
void release(node *lock, node *waiter);
void acquire(waiter *w);
void release(waiter *w);
private:
waiter *m_lock;
};
/// Scoped lock that owns a spinlock::waiter
class scoped_lock
{
public:
inline scoped_lock(spinlock &lock) : m_lock(lock) {
m_lock.acquire(&m_waiter);
}
inline ~scoped_lock() {
m_lock.release(&m_waiter);
}
private:
spinlock &m_lock;
spinlock::waiter m_waiter;
};
} // namespace spinlock
} // namespace kutil

View File

@@ -1,48 +1,49 @@
#include "kutil/spinlock.h"
namespace kutil {
namespace spinlock {
static constexpr int memorder = __ATOMIC_SEQ_CST;
spinlock::spinlock() : m_lock {nullptr} {}
spinlock::~spinlock() {}
void
aquire(node * &lock, node *waiter)
spinlock::acquire(waiter *w)
{
waiter->next = nullptr;
waiter->locked = true;
w->next = nullptr;
w->locked = true;
// Point the lock at this waiter
node *prev = __atomic_exchange_n(&lock, waiter, memorder);
waiter *prev = __atomic_exchange_n(&m_lock, w, memorder);
if (prev) {
// If there was a previous waiter, wait for them to
// unblock us
prev->next = waiter;
while (waiter->locked) {
prev->next = w;
while (w->locked) {
asm ("pause");
}
} else {
waiter->locked = false;
w->locked = false;
}
}
void
release(node * &lock, node *waiter)
spinlock::release(waiter *w)
{
if (!waiter->next) {
if (!w->next) {
// If we're still the last waiter, we're done
if(__atomic_compare_exchange_n(&lock, &waiter, nullptr, false, memorder, memorder))
if(__atomic_compare_exchange_n(&m_lock, &w, nullptr, false, memorder, memorder))
return;
}
// Wait for the subseqent waiter to tell us who they are
while (!waiter->next) {
while (!w->next) {
asm ("pause");
}
// Unblock the subseqent waiter
waiter->next->locked = false;
w->next->locked = false;
}
} // namespace spinlock
} // namespace kutil