Skip to content

Commit

Permalink
Bump version to v0.10. Update Rust to nightly-2022-02-11 (#547)
Browse files Browse the repository at this point in the history
  • Loading branch information
qinsoon authored Feb 15, 2022
1 parent 0c408d3 commit 42262c2
Show file tree
Hide file tree
Showing 15 changed files with 75 additions and 35 deletions.
45 changes: 45 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,48 @@
0.10.0 (2022-02-14)
===

GC Plans
---
* Removed plan-specific copy contexts. Now each plan needs to provide a configuration for
`GCWorkerCopyContext` (similar to how they config `Mutator`).
* Fixed a bug that `needs_log_bit` was always set to `true` for generational plans, no matter
their barrier used the log bit or not.
* Fixed a bug that we may overflow when calculating `get_available_pages()`.

Policies
---
* Refactored copy context. Now a copying policy provides its copy context.
* Mark sweep and mark compact now uses `ObjectIterator` for linear scan.

Scheduler
---
* Introduced `GCController`, a counterpart of `GCWorker`, for the controller thread.
* Refactored `GCWorker`. Now `GCWorker` is seperated into two parts, a thread local part `GCWorker`
which is owned by GC threads, and a shared part `GCWorkerShared` that is shared between GC threads
and the scheduler.
* Refactored the creation of the scheduler and the workers to remove some unnecessary `Option<T>` and `RwLock<T>`.

API
---
* Added `process_bulk()` that allows bindings to pass options as a string of key-value pairs.
* `ObjectModel::copy()` now takes `CopySemantics` as a parameter.
* Renamed `Collection::spawn_worker_thread()` to `spawn_gc_thread()`, which is now used to spawn both GC worker and
GC controller.
* `Collection::out_of_memory()` now takes `AllocationError` as a parameter which hints the binding
on how to handle the OOM error.
* `Collection::out_of_memory()` now allows a binding to return from the method in the case of a non-critical OOM.
If a binding returns, `alloc()` will return a zero address.

Misc
---
* Added `ObjectIterator` that provides linear scanning through a region to iterate
objects using the alloc bit.
* Added a feature `work_packet_stats` to optionally collect work packet statistics. Note that
MMTk used to always collect work packet statistics.
* Optimized the access to the SFT map.
* Fixed a few issues with documentation.
* The example header file `mmtk.h` now uses the prefix `mmtk_` for all the functions.

0.9.0 (2021-12-16)
===

Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "mmtk"
version = "0.9.0"
version = "0.10.0"
authors = ["The MMTk Developers <>"]
edition = "2018"
license = "MIT OR Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion rust-toolchain
Original file line number Diff line number Diff line change
@@ -1 +1 @@
nightly-2021-12-05
nightly-2022-02-11
3 changes: 0 additions & 3 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
#![allow(incomplete_features)]
#![feature(asm)]
#![feature(integer_atomics)]
#![feature(is_sorted)]
#![feature(drain_filter)]
#![feature(nll)]
#![feature(box_syntax)]
#![feature(maybe_uninit_extra)]
#![feature(get_mut_unchecked)]
#![feature(arbitrary_self_types)]
#![feature(associated_type_defaults)]
#![feature(specialization)]
Expand Down
10 changes: 5 additions & 5 deletions src/policy/copyspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {
}

fn is_live(&self, object: ObjectReference) -> bool {
!self.from_space() || object_forwarding::is_forwarded::<VM>(object)
!self.is_from_space() || object_forwarding::is_forwarded::<VM>(object)
}

fn is_movable(&self) -> bool {
Expand All @@ -43,7 +43,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {

#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
!self.from_space()
!self.is_from_space()
}

fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) {
Expand All @@ -53,7 +53,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {

#[inline(always)]
fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
if !self.from_space() {
if !self.is_from_space() {
return None;
}

Expand Down Expand Up @@ -179,7 +179,7 @@ impl<VM: VMBinding> CopySpace<VM> {
}
}

fn from_space(&self) -> bool {
fn is_from_space(&self) -> bool {
self.from_space.load(Ordering::SeqCst)
}

Expand All @@ -193,7 +193,7 @@ impl<VM: VMBinding> CopySpace<VM> {
) -> ObjectReference {
trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,);
debug_assert!(
self.from_space(),
self.is_from_space(),
"Trace object called for object ({:?}) in to-space",
object
);
Expand Down
4 changes: 2 additions & 2 deletions src/policy/largeobjectspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> {

pub fn prepare(&mut self, full_heap: bool) {
if full_heap {
debug_assert!(self.treadmill.from_space_empty());
debug_assert!(self.treadmill.is_from_space_empty());
self.mark_state = MARK_BIT - self.mark_state;
}
self.treadmill.flip(full_heap);
Expand All @@ -162,7 +162,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> {

pub fn release(&mut self, full_heap: bool) {
self.sweep_large_pages(true);
debug_assert!(self.treadmill.nursery_empty());
debug_assert!(self.treadmill.is_nursery_empty());
if full_heap {
self.sweep_large_pages(false);
}
Expand Down
9 changes: 4 additions & 5 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -578,14 +578,13 @@ impl<VM: VMBinding> CommonSpace<VM> {
extent
);

let start: Address;
if let VMRequest::Fixed { start: _start, .. } = vmrequest {
start = _start;
let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
_start
} else {
// FIXME
//if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
start = heap.reserve(extent, top);
}
heap.reserve(extent, top)
};
assert!(
start == chunk_align_up(start),
"{} starting on non-aligned boundary: {}",
Expand Down
2 changes: 1 addition & 1 deletion src/scheduler/stat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ impl SchedulerStat {
/// Extract the work-packet name from the full type name.
/// i.e. simplifies `crate::scheduler::gc_work::SomeWorkPacket<Semispace>` to `SomeWorkPacket`.
fn work_name(&self, name: &str) -> String {
let end_index = name.find('<').unwrap_or_else(|| name.len());
let end_index = name.find('<').unwrap_or(name.len());
let name = name[..end_index].to_owned();
match name.rfind(':') {
Some(start_index) => name[(start_index + 1)..end_index].to_owned(),
Expand Down
2 changes: 1 addition & 1 deletion src/util/analysis/obj_size.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ impl<VM: VMBinding> RtAnalysis<VM> for PerSizeClassObjectCounter {
match c {
None => {
// Create (and increment) the counter associated with the size class if it doesn't exist
let ctr = new_ctr!(stats, size_classes, &size_class);
let ctr = new_ctr!(stats, size_classes, size_class);
ctr.lock().unwrap().inc();
}
Some(ctr) => {
Expand Down
3 changes: 1 addition & 2 deletions src/util/heap/freelistpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -271,11 +271,10 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
self.free_list.set_uncoalescable(region_start as _);
self.free_list.set_uncoalescable(region_end as i32 + 1);
for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
let liberated;
if p != region_start {
self.free_list.clear_uncoalescable(p as _);
}
liberated = self.free_list.free(p as _, true); // add chunk to our free list
let liberated = self.free_list.free(p as _, true); // add chunk to our free list
debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
if self.meta_data_pages_per_region > 1 {
let meta_data_pages_per_region = self.meta_data_pages_per_region;
Expand Down
16 changes: 8 additions & 8 deletions src/util/metadata/side_metadata/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -580,13 +580,13 @@ pub fn fetch_add_atomic(

(old_val & mask) as usize
} else if bits_num_log == 3 {
unsafe { (&*meta_addr.to_ptr::<AtomicU8>()).fetch_add(val as u8, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU8>()).fetch_add(val as u8, order) as usize }
} else if bits_num_log == 4 {
unsafe { (&*meta_addr.to_ptr::<AtomicU16>()).fetch_add(val as u16, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU16>()).fetch_add(val as u16, order) as usize }
} else if bits_num_log == 5 {
unsafe { (&*meta_addr.to_ptr::<AtomicU32>()).fetch_add(val as u32, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU32>()).fetch_add(val as u32, order) as usize }
} else if bits_num_log == 6 {
unsafe { (&*meta_addr.to_ptr::<AtomicUsize>()).fetch_add(val, order) }
unsafe { (*meta_addr.to_ptr::<AtomicUsize>()).fetch_add(val, order) }
} else {
unreachable!(
"side metadata > {}-bits is not supported!",
Expand Down Expand Up @@ -639,13 +639,13 @@ pub fn fetch_sub_atomic(

(old_val & mask) as usize
} else if bits_num_log == 3 {
unsafe { (&*meta_addr.to_ptr::<AtomicU8>()).fetch_sub(val as u8, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU8>()).fetch_sub(val as u8, order) as usize }
} else if bits_num_log == 4 {
unsafe { (&*meta_addr.to_ptr::<AtomicU16>()).fetch_sub(val as u16, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU16>()).fetch_sub(val as u16, order) as usize }
} else if bits_num_log == 5 {
unsafe { (&*meta_addr.to_ptr::<AtomicU32>()).fetch_sub(val as u32, order) as usize }
unsafe { (*meta_addr.to_ptr::<AtomicU32>()).fetch_sub(val as u32, order) as usize }
} else if bits_num_log == 6 {
unsafe { (&*meta_addr.to_ptr::<AtomicUsize>()).fetch_sub(val, order) }
unsafe { (*meta_addr.to_ptr::<AtomicUsize>()).fetch_sub(val, order) }
} else {
unreachable!(
"side metadata > {}-bits is not supported!",
Expand Down
2 changes: 1 addition & 1 deletion src/util/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ impl UnsafeOptionsWrapper {
/// This method is not thread safe, as internally it acquires a mutable reference to self.
/// It is supposed to be used by one thread during boot time.
pub unsafe fn process(&self, name: &str, value: &str) -> bool {
(&mut *self.0.get()).set_from_command_line(name, value)
(*self.0.get()).set_from_command_line(name, value)
}

/// Bulk process options. Returns true if all the options are processed successfully.
Expand Down
2 changes: 1 addition & 1 deletion src/util/reference_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ impl ReferenceProcessor {
// TODO: We may need to rework on this to remove the unsafety.
#[allow(clippy::mut_from_ref)]
unsafe fn sync_mut(&self) -> &mut ReferenceProcessorSync {
(&mut *self.sync.get()).get_mut().unwrap()
(*self.sync.get()).get_mut().unwrap()
}

pub fn clear(&self) {
Expand Down
2 changes: 1 addition & 1 deletion src/util/rust_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ impl<T> InitializeOnce<T> {
pub fn get_ref(&self) -> &T {
// We only assert in debug builds.
debug_assert!(self.once.is_completed());
unsafe { (&*self.v.get()).assume_init_ref() }
unsafe { (*self.v.get()).assume_init_ref() }
}
}

Expand Down
6 changes: 3 additions & 3 deletions src/util/treadmill.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,15 @@ impl TreadMill {
self.to_space.lock().unwrap().insert(cell);
}

pub fn to_space_empty(&self) -> bool {
pub fn is_to_space_empty(&self) -> bool {
self.to_space.lock().unwrap().is_empty()
}

pub fn from_space_empty(&self) -> bool {
pub fn is_from_space_empty(&self) -> bool {
self.from_space.lock().unwrap().is_empty()
}

pub fn nursery_empty(&self) -> bool {
pub fn is_nursery_empty(&self) -> bool {
self.collect_nursery.lock().unwrap().is_empty()
}

Expand Down

0 comments on commit 42262c2

Please sign in to comment.