Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allows using primitive heap on stage 2 #1270

Merged
merged 2 commits into from
Feb 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,9 @@ impl ProcAbi for Proc0Abi {
}
}

// SAFETY: STAGE1_HEAP is a mutable static so it valid for reads and writes. This will be safe as
// long as no one access STAGE1_HEAP.
// SAFETY: PRIMITIVE_HEAP is a mutable static so it valid for reads and writes. This will be safe as
// long as no one access PRIMITIVE_HEAP.
#[allow(dead_code)]
#[cfg_attr(target_os = "none", global_allocator)]
static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut STAGE1_HEAP) };
static mut STAGE1_HEAP: [u8; 1024 * 1024] = unsafe { zeroed() };
static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
static mut PRIMITIVE_HEAP: [u8; 1024 * 1024] = unsafe { zeroed() };
52 changes: 31 additions & 21 deletions kernel/src/malloc/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use self::stage2::Stage2;
use self::stage2::VmHeap;
use crate::context::current_thread;
use crate::lock::Mutex;
use alloc::boxed::Box;
use core::alloc::{GlobalAlloc, Layout};
Expand All @@ -18,30 +19,30 @@ mod stage2;
/// The first stage is **not** thread safe so stage 2 must be activated before start a new CPU.
pub struct KernelHeap {
stage: UnsafeCell<Stage>,
stage1_ptr: *const u8,
stage1_end: *const u8,
primitive_ptr: *const u8,
primitive_end: *const u8,
}

impl KernelHeap {
/// # Safety
/// The specified memory must be valid for reads and writes and it must be exclusively available
/// to [`KernelHeap`].
pub const unsafe fn new<const L: usize>(stage1: *mut [u8; L]) -> Self {
let stage1_ptr = stage1.cast();
let stage1 = Talc::new(ClaimOnOom::new(Span::from_array(stage1)));
pub const unsafe fn new<const L: usize>(primitive: *mut [u8; L]) -> Self {
let primitive_ptr = primitive.cast();
let primitive = Talc::new(ClaimOnOom::new(Span::from_array(primitive)));

Self {
stage: UnsafeCell::new(Stage::One(RefCell::new(stage1))),
stage1_ptr,
stage1_end: stage1_ptr.add(L),
stage: UnsafeCell::new(Stage::One(RefCell::new(primitive))),
primitive_ptr,
primitive_end: primitive_ptr.add(L),
}
}

/// # Safety
/// This must be called by main CPU and can be called only once.
pub unsafe fn activate_stage2(&self) {
// Setup stage 2 using stage 1 heap.
let stage2 = Box::new(Stage2::new());
// Setup VM heap using primitive heap.
let stage2 = Box::new(VmHeap::new());

// What we are doing here is highly unsafe. Do not edit the code after this unless you know
// what you are doing!
Expand All @@ -64,12 +65,19 @@ unsafe impl GlobalAlloc for KernelHeap {
// context due to it can be called before the context has been activated.
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
match &*self.stage.get() {
Stage::One(s) => s
Stage::One(primitive) => primitive
.borrow_mut()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut()),
Stage::Two(s, _) => s.alloc(layout),
Stage::Two(vm, primitive) => match current_thread().active_heap_guard() {
0 => vm.alloc(layout),
_ => primitive
.lock()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut()),
},
}
}

Expand All @@ -78,16 +86,18 @@ unsafe impl GlobalAlloc for KernelHeap {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
match &*self.stage.get() {
Stage::One(s) => s.borrow_mut().free(NonNull::new_unchecked(ptr), layout),
Stage::Two(s2, s1) => {
if ptr.cast_const() >= self.stage1_ptr && ptr.cast_const() < self.stage1_end {
Stage::One(primitive) => primitive
.borrow_mut()
.free(NonNull::new_unchecked(ptr), layout),
Stage::Two(vm, primitive) => {
if ptr.cast_const() >= self.primitive_ptr && ptr.cast_const() < self.primitive_end {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned
// from our GlobalAlloc::alloc and layout to be the same one that passed to it.
s1.lock().free(NonNull::new_unchecked(ptr), layout)
primitive.lock().free(NonNull::new_unchecked(ptr), layout)
} else {
// SAFETY: ptr is not owned by stage 1 so with the requirements of
// GlobalAlloc::dealloc the pr will be owned by stage 2 for sure.
s2.dealloc(ptr, layout);
// SAFETY: ptr is not owned by primitive heap so with the requirements of
// GlobalAlloc::dealloc the ptr will be owned by VM heap for sure.
vm.dealloc(ptr, layout);
}
}
}
Expand All @@ -101,5 +111,5 @@ unsafe impl Sync for KernelHeap {}
/// Stage of [KernelHeap].
enum Stage {
One(RefCell<Talc<ClaimOnOom>>),
Two(Box<Stage2>, Mutex<Talc<ClaimOnOom>>),
Two(Box<VmHeap>, Mutex<Talc<ClaimOnOom>>),
}
27 changes: 17 additions & 10 deletions kernel/src/malloc/stage2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,14 @@ use core::alloc::Layout;
use core::cell::RefCell;
use core::num::NonZero;

/// Stage 2 kernel heap.
///
/// This stage allocate a memory from a virtual memory management system. This struct is a merge of
/// `malloc_type` and `malloc_type_internal` structure.
pub struct Stage2 {
/// Kernel heap that allocate a memory from a virtual memory management system. This struct is a
/// merge of `malloc_type` and `malloc_type_internal` structure.
pub struct VmHeap {
zones: [Vec<Arc<UmaZone>>; (usize::BITS - 1) as usize], // kmemsize + kmemzones
stats: CpuLocal<RefCell<Stats>>, // mti_stats
}

impl Stage2 {
impl VmHeap {
const KMEM_ZSHIFT: usize = 4;
const KMEM_ZBASE: usize = 16;
const KMEM_ZMASK: usize = Self::KMEM_ZBASE - 1;
Expand Down Expand Up @@ -76,10 +74,15 @@ impl Stage2 {

/// Returns null on failure.
///
/// See `malloc` on the PS4 for a reference.
/// See `malloc` on the Orbis for a reference.
///
/// # Safety
/// `layout` must be nonzero.
///
/// # Reference offsets
/// | Version | Offset |
/// |---------|--------|
/// |PS4 11.00|0x1A4220|
pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Our implementation imply M_WAITOK.
let td = current_thread();
Expand All @@ -89,9 +92,9 @@ impl Stage2 {
}

// Determine how to allocate.
let lock = td.disable_vm_heap();
let size = layout.size();

if size <= PAGE_SIZE.get() {
let mem = if size <= PAGE_SIZE.get() {
// Get zone to allocate from.
let align = layout.align().trailing_zeros() as usize;
let size = if (size & Self::KMEM_ZMASK) != 0 {
Expand Down Expand Up @@ -122,7 +125,11 @@ impl Stage2 {
mem
} else {
todo!()
}
};

drop(lock);

mem
}

/// # Safety
Expand Down
39 changes: 37 additions & 2 deletions kernel/src/proc/thread/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use super::Proc;
use crate::lock::{Gutex, GutexGroup, GutexWrite};
use alloc::sync::Arc;
use core::cell::Cell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicU8, Ordering};

mod cell;
Expand All @@ -15,15 +16,16 @@ mod cell;
/// We subtitute `TDP_NOSLEEPING` with `td_intr_nesting_level` and `td_critnest` since it is the
/// only cases the thread should not allow to sleep.
///
/// Do not try to access any [`PrivateCell`] fields from interrupt handler because it might
/// currently locked, which will can cause a panic.
/// Do not try to access any [RefCell](core::cell::RefCell) fields from interrupt handler because it
/// might currently locked.
pub struct Thread {
proc: Arc<Proc>, // td_proc
active_pins: AtomicU8, // td_critnest
active_interrupts: AtomicU8, // td_intr_nesting_level
active_mutexes: PrivateCell<Cell<u16>>, // td_locks
sleeping: Gutex<usize>, // td_wchan
profiling_ticks: PrivateCell<Cell<u32>>, // td_pticks
active_heap_guard: PrivateCell<Cell<usize>>,
}

impl Thread {
Expand All @@ -45,6 +47,7 @@ impl Thread {
active_mutexes: PrivateCell::default(),
sleeping: gg.spawn(0),
profiling_ticks: PrivateCell::default(),
active_heap_guard: PrivateCell::default(),
}
}

Expand Down Expand Up @@ -99,4 +102,36 @@ impl Thread {
pub fn set_profiling_ticks(&self, v: u32) {
set!(self, profiling_ticks, v)
}

/// # Panics
/// If called from the other thread.
pub fn active_heap_guard(&self) -> usize {
get!(self, active_heap_guard)
}

pub fn disable_vm_heap(&self) -> HeapGuard {
let v = get!(self, active_heap_guard).checked_add(1).unwrap();

set!(self, active_heap_guard, v);

HeapGuard {
td: self,
phantom: PhantomData,
}
}
}

/// RAII struct to disable VM heap for the thread.
pub struct HeapGuard<'a> {
td: &'a Thread,
phantom: PhantomData<*const ()>, // For !Send and !Sync.
}

impl Drop for HeapGuard<'_> {
fn drop(&mut self) {
let td = self.td;
let v = get!(td, active_heap_guard) - 1;

set!(td, active_heap_guard, v);
}
}