Skip to content

Commit

Permalink
revise spinlocks to support the lock_api interface
Browse files Browse the repository at this point in the history
  • Loading branch information
stlankes committed Nov 17, 2024
1 parent d0500a9 commit 6f5540c
Show file tree
Hide file tree
Showing 5 changed files with 62 additions and 116 deletions.
15 changes: 3 additions & 12 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ default = ["qemu-exit"]
vga = []

[dependencies]
spin = "0.9"
talc = "4.4.2"
lock_api = "0.4.12"
qemu-exit = { version = "3.0", optional = true }
x86 = { version = "0.52", default-features = false }
cfg-if = "1.0"
Expand Down
4 changes: 2 additions & 2 deletions src/arch/x86/kernel/serial.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::synch::spinlock::Spinlock;
use core::fmt;
use spin::Mutex;
use x86::io::*;

/// A COM serial port.
Expand Down Expand Up @@ -32,4 +32,4 @@ impl fmt::Write for ComPort {
}

/// Our primary serial port.
pub(crate) static COM1: Mutex<ComPort> = Mutex::new(ComPort::new(0x3F8));
pub(crate) static COM1: Spinlock<ComPort> = Spinlock::new(ComPort::new(0x3F8));
4 changes: 2 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@
#![no_std]

extern crate alloc;
extern crate spin;
#[cfg(target_arch = "x86_64")]
extern crate x86;

// These need to be visible to the linker, so we need to export them.
use crate::arch::processor::shutdown;
use crate::consts::HEAP_SIZE;
use crate::synch::spinlock::RawSpinlock;
use core::panic::PanicInfo;
use core::ptr::addr_of;
use talc::*;
Expand All @@ -33,7 +33,7 @@ pub mod synch;
static mut ARENA: [u8; HEAP_SIZE] = [0; HEAP_SIZE];

#[global_allocator]
static ALLOCATOR: Talck<spin::Mutex<()>, ClaimOnOom> = Talc::new(unsafe {
static ALLOCATOR: Talck<RawSpinlock, ClaimOnOom> = Talc::new(unsafe {
ClaimOnOom::new(Span::from_array(addr_of!(ARENA) as *mut [u8; HEAP_SIZE]))
})
.lock();
Expand Down
153 changes: 54 additions & 99 deletions src/synch/spinlock.rs
Original file line number Diff line number Diff line change
@@ -1,126 +1,81 @@
use crate::arch;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::Sync;
use core::ops::{Deref, DerefMut, Drop};
use core::sync::atomic::{AtomicUsize, Ordering};
use lock_api::{GuardSend, RawMutex, RawMutexFair};

/// This type provides a lock based on busy waiting to realize mutual exclusion of tasks.
/// A [fair] [ticket lock].
///
/// # Description
///
/// This structure behaves a lot like a normal Mutex. There are some differences:
///
/// - By using busy waiting, it can be used outside the runtime.
/// - It is a so called ticket lock (https://en.wikipedia.org/wiki/Ticket_lock)
/// and completly fair.
///
/// The interface is derived from https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html.
///
/// # Simple examples
///
/// ```
/// let spinlock = synch::Spinlock::new(0);
///
/// // Modify the data
/// {
/// let mut data = spinlock.lock();
/// *data = 2;
/// }
///
/// // Read the data
/// let answer =
/// {
/// let data = spinlock.lock();
/// *data
/// };
///
/// assert_eq!(answer, 2);
/// ```
pub struct Spinlock<T: ?Sized> {
/// [fair]: https://en.wikipedia.org/wiki/Unbounded_nondeterminism
/// [ticket lock]: https://en.wikipedia.org/wiki/Ticket_lock
pub struct RawSpinlock {
queue: AtomicUsize,
dequeue: AtomicUsize,
data: UnsafeCell<T>,
}

/// A guard to which the protected data can be accessed
///
/// When the guard falls out of scope it will release the lock.
pub struct SpinlockGuard<'a, T: ?Sized + 'a> {
//queue: &'a AtomicUsize,
dequeue: &'a AtomicUsize,
data: &'a mut T,
}
unsafe impl RawMutex for RawSpinlock {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self {
queue: AtomicUsize::new(0),
dequeue: AtomicUsize::new(0),
};

// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
type GuardMarker = GuardSend;

impl<T> Spinlock<T> {
pub const fn new(user_data: T) -> Spinlock<T> {
Spinlock {
queue: AtomicUsize::new(0),
dequeue: AtomicUsize::new(1),
data: UnsafeCell::new(user_data),
}
}

/// Consumes this mutex, returning the underlying data.
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let Spinlock { data, .. } = self;
data.into_inner()
}
}

impl<T: ?Sized> Spinlock<T> {
fn obtain_lock(&self) {
let ticket = self.queue.fetch_add(1, Ordering::Relaxed) + 1;
#[inline]
fn lock(&self) {
let ticket = self.queue.fetch_add(1, Ordering::Relaxed);
while self.dequeue.load(Ordering::Acquire) != ticket {
arch::processor::pause();
}
}

pub fn lock(&self) -> SpinlockGuard<T> {
self.obtain_lock();
SpinlockGuard {
//queue: &self.queue,
dequeue: &self.dequeue,
data: unsafe { &mut *self.data.get() },
}
}
}
#[inline]
fn try_lock(&self) -> bool {
let ticket = self
.queue
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
if self.dequeue.load(Ordering::Acquire) == ticket {
Some(ticket + 1)
} else {
None
}
});

impl<T: ?Sized + fmt::Debug> fmt::Debug for Spinlock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "queue: {} ", self.queue.load(Ordering::SeqCst))?;
write!(f, "dequeue: {}", self.dequeue.load(Ordering::SeqCst))
ticket.is_ok()
}
}

impl<T: Default> Default for Spinlock<T> {
fn default() -> Spinlock<T> {
Spinlock::new(Default::default())
#[inline]
unsafe fn unlock(&self) {
self.dequeue.fetch_add(1, Ordering::Release);
}
}

impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.data
#[inline]
fn is_locked(&self) -> bool {
let ticket = self.dequeue.load(Ordering::Relaxed);
self.dequeue.load(Ordering::Relaxed) != ticket
}
}

impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.data
unsafe impl RawMutexFair for RawSpinlock {
#[inline]
unsafe fn unlock_fair(&self) {
unsafe { self.unlock() }
}
}

impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
/// The dropping of the SpinlockGuard will release the lock it was created from.
fn drop(&mut self) {
self.dequeue.fetch_add(1, Ordering::Release);
#[inline]
unsafe fn bump(&self) {
let ticket = self.queue.load(Ordering::Relaxed);
let serving = self.dequeue.load(Ordering::Relaxed);
if serving + 1 != ticket {
unsafe {
self.unlock_fair();
self.lock();
}
}
}
}

/// A [`lock_api::Mutex`] based on [`RawSpinlockMutex`].
pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;

/// A [`lock_api::MutexGuard`] based on [`RawSpinlockMutex`].
pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;

0 comments on commit 6f5540c

Please sign in to comment.