diff --git a/Cargo.toml b/Cargo.toml index 1ce4a83bec..3bae0a9dc0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,6 @@ itertools = "0.12.0" jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true } lazy_static = "1.1" libc = "0.2" -log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] } memoffset = "0.9" mimalloc-sys = { version = "0.1.6", optional = true } # MMTk macros - we have to specify a version here in order to publish the crate, even though we use the dependency from a local path. @@ -51,6 +50,9 @@ static_assertions = "1.1.0" strum = "0.26.2" strum_macros = "0.26.2" sysinfo = "0.30.9" +# mmtk-core internally uses wrapper macros in the `mmtk::util::log` module. +# We rename the crate so that programmers don't unintentially use the `log` crate directly. +the_log_crate = { package = "log", version = "0.4"} [dev-dependencies] paste = "1.0.8" @@ -171,6 +173,11 @@ count_live_bytes_in_gc = [] # capture the type names of work packets. bpftrace_workaround = [] +# Enable verbose logs in release build. Those are placed on hot paths, so merely checking whether +# logging is enabled may degrade performance. Currently such logs include `debug!` and `trace!` +# levels. +hot_log = [] + # Do not modify the following line - ci-common.sh matches it # -- Mutally exclusive features -- # Only one feature from each group can be provided. Otherwise build will fail. diff --git a/src/global_state.rs b/src/global_state.rs index f355e697f2..64b7a2dbc2 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -1,3 +1,4 @@ +use crate::util::log; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Mutex; use std::time::Instant; @@ -160,7 +161,7 @@ impl GlobalState { /// Increase the allocation bytes and return the current allocation bytes after increasing pub fn increase_allocation_bytes_by(&self, size: usize) -> usize { let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst); - trace!( + log::trace!( "Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}", old_allocation_bytes, size, diff --git a/src/lib.rs b/src/lib.rs index def57e4756..81211120d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -30,8 +30,6 @@ #[macro_use] extern crate lazy_static; #[macro_use] -extern crate log; -#[macro_use] extern crate downcast_rs; #[macro_use] extern crate static_assertions; diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 9c6b30034a..c91cb878a9 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -20,6 +20,7 @@ use crate::scheduler::{GCWork, GCWorker}; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE}; use crate::util::heap::layout::vm_layout::vm_layout; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::slot::MemorySlice; @@ -52,8 +53,8 @@ use crate::vm::VMBinding; /// * `builder`: The reference to a MMTk builder. pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { match crate::util::logger::try_init() { - Ok(_) => debug!("MMTk initialized the logger."), - Err(_) => debug!( + Ok(_) => log::debug!("MMTk initialized the logger."), + Err(_) => log::debug!( "MMTk failed to initialize the logger. Possibly a logger has been initialized by user." ), } @@ -69,19 +70,20 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { if split[0] == "Threads:" { let threads = split[1].parse::().unwrap(); if threads != 1 { - warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads); + log::warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads); } } } } let mmtk = builder.build(); - info!( + log::info!( "Initialized MMTk with {:?} ({:?})", - *mmtk.options.plan, *mmtk.options.gc_trigger + *mmtk.options.plan, + *mmtk.options.gc_trigger ); #[cfg(feature = "extreme_assertions")] - warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected."); + log::warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected."); Box::new(mmtk) } @@ -115,7 +117,7 @@ pub fn bind_mutator( const LOG_ALLOCATOR_MAPPING: bool = false; if LOG_ALLOCATOR_MAPPING { - info!("{:?}", mutator.config); + log::info!("{:?}", mutator.config); } mutator } @@ -477,7 +479,7 @@ pub fn gc_poll(mmtk: &MMTK, tls: VMMutatorThread) { ); if VM::VMCollection::is_collection_enabled() && mmtk.gc_trigger.poll(false, None) { - debug!("Collection required"); + log::debug!("Collection required"); assert!(mmtk.state.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); VM::VMCollection::block_for_gc(tls); } @@ -764,7 +766,7 @@ pub fn add_finalizer( object: >::FinalizableType, ) { if *mmtk.options.no_finalizer { - warn!("add_finalizer() is called when no_finalizer = true"); + log::warn!("add_finalizer() is called when no_finalizer = true"); } mmtk.finalizable_processor.lock().unwrap().add(object); @@ -823,7 +825,7 @@ pub fn get_finalized_object( mmtk: &'static MMTK, ) -> Option<>::FinalizableType> { if *mmtk.options.no_finalizer { - warn!("get_finalized_object() is called when no_finalizer = true"); + log::warn!("get_finalized_object() is called when no_finalizer = true"); } mmtk.finalizable_processor @@ -843,7 +845,7 @@ pub fn get_all_finalizers( mmtk: &'static MMTK, ) -> Vec<>::FinalizableType> { if *mmtk.options.no_finalizer { - warn!("get_all_finalizers() is called when no_finalizer = true"); + log::warn!("get_all_finalizers() is called when no_finalizer = true"); } mmtk.finalizable_processor @@ -863,7 +865,7 @@ pub fn get_finalizers_for( object: ObjectReference, ) -> Vec<>::FinalizableType> { if *mmtk.options.no_finalizer { - warn!("get_finalizers() is called when no_finalizer = true"); + log::warn!("get_finalizers() is called when no_finalizer = true"); } mmtk.finalizable_processor diff --git a/src/mmtk.rs b/src/mmtk.rs index d636e2472a..ada16939d4 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -15,6 +15,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; use crate::util::heap::HeapMeta; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; @@ -25,6 +26,7 @@ use crate::util::slot_logger::SlotLogger; use crate::util::statistics::stats::Stats; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; + use std::cell::UnsafeCell; use std::default::Default; use std::sync::atomic::{AtomicBool, Ordering}; @@ -419,12 +421,12 @@ impl MMTK { ) -> bool { use crate::vm::Collection; if !self.get_plan().constraints().collects_garbage { - warn!("User attempted a collection request, but the plan can not do GC. The request is ignored."); + log::warn!("User attempted a collection request, but the plan can not do GC. The request is ignored."); return false; } if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() { - info!("User triggering collection"); + log::info!("User triggering collection"); if exhaustive { if let Some(gen) = self.get_plan().generational() { gen.force_full_heap_collection(); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 91f6f7e911..8f9424b39f 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -9,6 +9,7 @@ use crate::scheduler::*; use crate::util::copy::CopySemantics; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::log; use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; @@ -101,7 +102,7 @@ impl CommonGenPlan { let cur_nursery = self.nursery.reserved_pages(); let max_nursery = self.common.base.gc_trigger.get_max_nursery_pages(); let nursery_full = cur_nursery >= max_nursery; - trace!( + log::trace!( "nursery_full = {:?} (nursery = {}, max_nursery = {})", nursery_full, cur_nursery, @@ -143,7 +144,7 @@ impl CommonGenPlan { // The conditions are complex, and it is easier to read if we put them to separate if blocks. #[allow(clippy::if_same_then_else, clippy::needless_bool)] let is_full_heap = if crate::plan::generational::FULL_NURSERY_GC { - trace!("full heap: forced full heap"); + log::trace!("full heap: forced full heap"); // For barrier overhead measurements, we always do full gc in nursery collections. true } else if self @@ -154,7 +155,7 @@ impl CommonGenPlan { .load(Ordering::SeqCst) && *self.common.base.options.full_heap_system_gc { - trace!("full heap: user triggered"); + log::trace!("full heap: user triggered"); // User triggered collection, and we force full heap for user triggered collection true } else if self.next_gc_full_heap.load(Ordering::SeqCst) @@ -166,7 +167,7 @@ impl CommonGenPlan { .load(Ordering::SeqCst) > 1 { - trace!( + log::trace!( "full heap: next_gc_full_heap = {}, cur_collection_attempts = {}", self.next_gc_full_heap.load(Ordering::SeqCst), self.common @@ -178,7 +179,7 @@ impl CommonGenPlan { // Forces full heap collection true } else if Self::virtual_memory_exhausted(plan.generational().unwrap()) { - trace!("full heap: virtual memory exhausted"); + log::trace!("full heap: virtual memory exhausted"); true } else { // We use an Appel-style nursery. The default GC (even for a "heap-full" collection) @@ -191,7 +192,7 @@ impl CommonGenPlan { self.gc_full_heap.store(is_full_heap, Ordering::SeqCst); - info!( + log::info!( "{}", if is_full_heap { "Full heap GC" @@ -265,7 +266,7 @@ impl CommonGenPlan { let available = plan.get_available_pages(); let min_nursery = plan.base().gc_trigger.get_min_nursery_pages(); let next_gc_full_heap = available < min_nursery; - trace!( + log::trace!( "next gc will be full heap? {}, available pages = {}, min nursery = {}", next_gc_full_heap, available, diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index ba2850b3a4..358ba550c3 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -20,6 +20,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::log; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -109,10 +110,10 @@ impl Plan for GenImmix { probe!(mmtk, gen_full_heap, is_full_heap); if !is_full_heap { - info!("Nursery GC"); + log::info!("Nursery GC"); scheduler.schedule_common_work::>(self); } else { - info!("Full heap GC"); + log::info!("Full heap GC"); crate::plan::immix::Immix::schedule_immix_full_heap_collection::< GenImmix, GenImmixMatureGCWorkContext, diff --git a/src/plan/global.rs b/src/plan/global.rs index 14695c2d00..357e4adc47 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -19,6 +19,7 @@ use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; +use crate::util::log; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; @@ -233,7 +234,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes); let total = used_pages + collection_reserve + vm_live_pages; - trace!( + log::trace!( "Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}", total, used_pages, @@ -267,7 +268,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { // may be larger than the reserved pages before a GC, as we may end up using more memory for thread local // buffers for copy allocators). let available_pages = total_pages.saturating_sub(reserved_pages); - trace!( + log::trace!( "Total pages = {}, reserved pages = {}, available pages = {}", total_pages, reserved_pages, @@ -491,25 +492,25 @@ impl BasePlan { ) -> ObjectReference { #[cfg(feature = "code_space")] if self.code_space.in_space(object) { - trace!("trace_object: object in code space"); + log::trace!("trace_object: object in code space"); return self.code_space.trace_object::(queue, object); } #[cfg(feature = "code_space")] if self.code_lo_space.in_space(object) { - trace!("trace_object: object in large code space"); + log::trace!("trace_object: object in large code space"); return self.code_lo_space.trace_object::(queue, object); } #[cfg(feature = "ro_space")] if self.ro_space.in_space(object) { - trace!("trace_object: object in ro_space space"); + log::trace!("trace_object: object in ro_space space"); return self.ro_space.trace_object(queue, object); } #[cfg(feature = "vm_space")] if self.vm_space.in_space(object) { - trace!("trace_object: object in boot space"); + log::trace!("trace_object: object in boot space"); return self.vm_space.trace_object(queue, object); } @@ -545,18 +546,18 @@ impl BasePlan { &self.options, ); if stress_force_gc { - debug!( + log::debug!( "Stress GC: allocation_bytes = {}, stress_factor = {}", self.global_state.allocation_bytes.load(Ordering::Relaxed), *self.options.stress_factor ); - debug!("Doing stress GC"); + log::debug!("Doing stress GC"); self.global_state .allocation_bytes .store(0, Ordering::SeqCst); } - debug!( + log::debug!( "self.get_reserved_pages()={}, self.get_total_pages()={}", plan.get_reserved_pages(), plan.get_total_pages() @@ -622,15 +623,15 @@ impl CommonPlan { worker: &mut GCWorker, ) -> ObjectReference { if self.immortal.in_space(object) { - trace!("trace_object: object in immortal space"); + log::trace!("trace_object: object in immortal space"); return self.immortal.trace_object(queue, object); } if self.los.in_space(object) { - trace!("trace_object: object in los"); + log::trace!("trace_object: object in los"); return self.los.trace_object(queue, object); } if self.nonmoving.in_space(object) { - trace!("trace_object: object in nonmoving space"); + log::trace!("trace_object: object in nonmoving space"); return self.nonmoving.trace_object(queue, object); } self.base.trace_object::(queue, object, worker) diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 65a6b2eab8..7e6a00561b 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -10,6 +10,7 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::log; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::{plan::global::BasePlan, vm::VMBinding}; use crate::{ @@ -86,7 +87,7 @@ impl Plan for PageProtect { impl PageProtect { pub fn new(args: CreateGeneralPlanArgs) -> Self { // Warn users that the plan may fail due to maximum mapping allowed. - warn!( + log::warn!( "PageProtect uses a high volume of memory mappings. \ If you encounter failures in memory protect/unprotect in this plan,\ consider increase the maximum mapping allowed by the OS{}.", diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 7b41c7b854..fdeed73ad6 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -15,6 +15,7 @@ use crate::util::copy::CopyConfig; use crate::util::copy::CopySelector; use crate::util::copy::CopySemantics; use crate::util::heap::gc_trigger::SpaceStats; +use crate::util::log; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::statistics::counter::EventCounter; use crate::vm::ObjectModel; @@ -90,11 +91,11 @@ impl Plan for StickyImmix { probe!(mmtk, gen_full_heap, is_full_heap); if !is_full_heap { - info!("Nursery GC"); + log::info!("Nursery GC"); // nursery GC -- we schedule it scheduler.schedule_common_work::>(self); } else { - info!("Full heap GC"); + log::info!("Full heap GC"); use crate::plan::immix::Immix; use crate::policy::immix::TRACE_KIND_DEFRAG; Immix::schedule_immix_full_heap_collection::< @@ -182,14 +183,14 @@ impl Plan for StickyImmix { if self.is_current_gc_nursery() { // Every reachable object should be logged if !VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::SeqCst) { - error!("Object {} is not unlogged (all objects that have been traced should be unlogged/mature)", object); + log::error!("Object {} is not unlogged (all objects that have been traced should be unlogged/mature)", object); return false; } // Every reachable object should be marked if self.immix.immix_space.in_space(object) && !self.immix.immix_space.is_marked(object) { - error!( + log::error!( "Object {} is not marked (all objects that have been traced should be marked)", object ); @@ -197,7 +198,7 @@ impl Plan for StickyImmix { } else if self.immix.common.los.in_space(object) && !self.immix.common.los.is_live(object) { - error!("LOS Object {} is not marked", object); + log::error!("LOS Object {} is not marked", object); return false; } } @@ -251,11 +252,11 @@ impl crate::plan::generational::global::GenerationalPlanExt f if self.immix.immix_space.in_space(object) { if !self.is_object_in_nursery(object) { // Mature object - trace!("Immix mature object {}, skip", object); + log::trace!("Immix mature object {}, skip", object); return object; } else { let object = if KIND == TRACE_KIND_TRANSITIVE_PIN || KIND == TRACE_KIND_FAST { - trace!( + log::trace!( "Immix nursery object {} is being traced without moving", object ); @@ -272,7 +273,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f worker, true, ); - trace!( + log::trace!( "Immix nursery object {} is being traced with opportunistic copy {}", object, if ret == object { @@ -283,7 +284,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f ); ret } else { - trace!( + log::trace!( "Immix nursery object {} is being traced without moving", object ); @@ -346,7 +347,7 @@ impl StickyImmix { // Separate each condition so the code is clear #[allow(clippy::if_same_then_else, clippy::needless_bool)] if crate::plan::generational::FULL_NURSERY_GC { - trace!("full heap: forced full heap"); + log::trace!("full heap: forced full heap"); // For barrier overhead measurements, we always do full gc in nursery collections. true } else if self diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 2f9ac4c3c9..6a6a92ea79 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -3,6 +3,8 @@ use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage}; +#[cfg(debug_assertions)] +use crate::util::log; use crate::util::ObjectReference; use crate::vm::SlotVisitor; @@ -116,7 +118,7 @@ impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { #[cfg(debug_assertions)] { use crate::vm::slot::Slot; - trace!( + log::trace!( "(ObjectsClosure) Visit slot {:?} (pointing to {:?})", slot, slot.load() diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 6b30f80117..bfeda53cce 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -7,12 +7,14 @@ use crate::policy::space::{CommonSpace, Space}; use crate::scheduler::GCWorker; use crate::util::alloc::allocator::AllocatorContext; use crate::util::heap::{MonotonePageResource, PageResource}; +use crate::util::log; use crate::util::metadata::{extract_side_metadata, MetadataSpec}; use crate::util::object_enum::ObjectEnumerator; use crate::util::object_forwarding; use crate::util::{copy::*, object_enum}; use crate::util::{Address, ObjectReference}; use crate::vm::*; + use libc::{mprotect, PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -218,7 +220,7 @@ impl CopySpace { semantics: Option, worker: &mut GCWorker, ) -> ObjectReference { - trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,); + log::trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,); // If this is not from space, we do not need to trace it (the object has been copied to the tosapce) if !self.is_from_space() { @@ -236,18 +238,18 @@ impl CopySpace { object ); - trace!("attempting to forward"); + log::trace!("attempting to forward"); let forwarding_status = object_forwarding::attempt_to_forward::(object); - trace!("checking if object is being forwarded"); + log::trace!("checking if object is being forwarded"); if object_forwarding::state_is_forwarded_or_being_forwarded(forwarding_status) { - trace!("... yes it is"); + log::trace!("... yes it is"); let new_object = object_forwarding::spin_and_get_forwarded_object::(object, forwarding_status); - trace!("Returning"); + log::trace!("Returning"); new_object } else { - trace!("... no it isn't. Copying"); + log::trace!("... no it isn't. Copying"); let new_object = object_forwarding::forward_object::( object, semantics.unwrap(), @@ -258,9 +260,9 @@ impl CopySpace { }, ); - trace!("Forwarding pointer"); + log::trace!("Forwarding pointer"); queue.enqueue(new_object); - trace!("Copied [{:?} -> {:?}]", object, new_object); + log::trace!("Copied [{:?} -> {:?}]", object, new_object); new_object } } @@ -277,7 +279,7 @@ impl CopySpace { unsafe { mprotect(start.to_mut_ptr(), extent, PROT_NONE); } - trace!("Protect {:x} {:x}", start, start + extent); + log::trace!("Protect {:x} {:x}", start, start + extent); } #[allow(dead_code)] // Only used with certain features (such as sanity) @@ -296,7 +298,7 @@ impl CopySpace { PROT_READ | PROT_WRITE | PROT_EXEC, ); } - trace!("Unprotect {:x} {:x}", start, start + extent); + log::trace!("Unprotect {:x} {:x}", start, start + extent); } } diff --git a/src/policy/immix/defrag.rs b/src/policy/immix/defrag.rs index 8861e30bdc..203218db84 100644 --- a/src/policy/immix/defrag.rs +++ b/src/policy/immix/defrag.rs @@ -4,8 +4,10 @@ use super::{ ImmixSpace, }; use crate::util::linear_scan::Region; +use crate::util::log; use crate::{policy::space::Space, Plan}; use crate::{util::constants::LOG_BYTES_IN_PAGE, vm::*}; + use spin::Mutex; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -78,7 +80,7 @@ impl Defrag { || !exhausted_reusable_space || super::STRESS_DEFRAG || (collect_whole_heap && user_triggered && full_heap_system_gc)); - info!("Defrag: {}", in_defrag); + log::info!("Defrag: {}", in_defrag); probe!(mmtk, immix_defrag, in_defrag); self.in_defrag_collection .store(in_defrag, Ordering::Release) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 6ebd9c255e..dbce455776 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -287,7 +287,7 @@ impl ImmixSpace { space_args: ImmixSpaceArgs, ) -> Self { #[cfg(feature = "immix_non_moving")] - info!( + crate::util::log::info!( "Creating non-moving ImmixSpace: {}. Block size: 2^{}", args.name, Block::LOG_BYTES diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index e4f49f7c28..e3bcf1f499 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -4,6 +4,7 @@ use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; use crate::util::heap::{MonotonePageResource, PageResource}; +use crate::util::log; use crate::util::metadata::mark_bit::MarkState; use crate::util::object_enum::{self, ObjectEnumerator}; @@ -186,7 +187,7 @@ impl ImmortalSpace { } else { // Otherwise, we reset the mark bit for the allocated regions. for (addr, size) in self.pr.iterate_allocated_regions() { - debug!( + log::debug!( "{:?}: reset mark bit from {} to {}", self.name(), addr, diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8906a50044..9c9a025fc5 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -7,6 +7,7 @@ use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::{FreeListPageResource, PageResource}; +use crate::util::log; use crate::util::metadata; use crate::util::object_enum::ObjectEnumerator; use crate::util::opaque_pointer::*; @@ -257,7 +258,7 @@ impl LargeObjectSpace { object ); let nursery_object = self.is_in_nursery(object); - trace!( + log::trace!( "LOS object {} {} a nursery object", object, if nursery_object { "is" } else { "is not" } @@ -266,7 +267,7 @@ impl LargeObjectSpace { // Note that test_and_mark() has side effects of // clearing nursery bit/moving objects out of logical nursery if self.test_and_mark(object, self.mark_state) { - trace!("LOS object {} is being marked now", object); + log::trace!("LOS object {} is being marked now", object); self.treadmill.copy(object, nursery_object); // We just moved the object out of the logical nursery, mark it as unlogged. if nursery_object && self.common.needs_log_bit { @@ -275,7 +276,7 @@ impl LargeObjectSpace { } queue.enqueue(object); } else { - trace!( + log::trace!( "LOS object {} is not being marked now, it was marked before", object ); diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 05cf448001..05fcf88cc6 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -13,6 +13,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::heap::PageResource; use crate::util::heap::VMRequest; +use crate::util::log; use crate::util::memory::MmapStrategy; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSanity; @@ -136,7 +137,7 @@ impl Space for LockFreeImmortalSpace { } fn acquire(&self, _tls: VMThread, pages: usize) -> Address { - trace!("LockFreeImmortalSpace::acquire"); + log::trace!("LockFreeImmortalSpace::acquire"); let bytes = conversions::pages_to_bytes(pages); let start = self .cursor diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 7843edef9a..e4c809c699 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -10,6 +10,7 @@ use crate::util::alloc::allocator::align_allocation_no_fill; use crate::util::constants::LOG_BYTES_IN_WORD; use crate::util::copy::CopySemantics; use crate::util::heap::{MonotonePageResource, PageResource}; +use crate::util::log; use crate::util::metadata::{extract_side_metadata, vo_bit}; use crate::util::object_enum::{self, ObjectEnumerator}; use crate::util::{Address, ObjectReference}; @@ -383,7 +384,7 @@ impl MarkCompactSpace { ); // update forwarding pointer Self::store_header_forwarding_pointer(obj, new_obj); - trace!( + log::trace!( "Calculate forward: {} (size when copied = {}) ~> {} (size = {})", obj, VM::VMObjectModel::get_size_when_copied(obj), @@ -407,12 +408,12 @@ impl MarkCompactSpace { let maybe_forwarding_pointer = Self::get_header_forwarding_pointer(obj); if let Some(forwarding_pointer) = maybe_forwarding_pointer { - trace!("Compact {} to {}", obj, forwarding_pointer); + log::trace!("Compact {} to {}", obj, forwarding_pointer); let new_object = forwarding_pointer; Self::clear_header_forwarding_pointer(new_object); // copy object - trace!(" copy from {} to {}", obj, new_object); + log::trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update VO bit, @@ -420,12 +421,12 @@ impl MarkCompactSpace { to = new_object.to_object_start::() + copied_size; debug_assert_eq!(end_of_new_object, to); } else { - trace!("Skipping dead object {}", obj); + log::trace!("Skipping dead object {}", obj); } } } - debug!("Compact end: to = {}", to); + log::debug!("Compact end: to = {}", to); // reset the bump pointer self.pr.reset_cursor(to); diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index d6a3124a1a..f91ed22c0b 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -9,6 +9,7 @@ use crate::policy::space::CommonSpace; use crate::scheduler::GCWorkScheduler; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::PageResource; +use crate::util::log; use crate::util::malloc::library::{BYTES_IN_MALLOC_PAGE, LOG_BYTES_IN_MALLOC_PAGE}; use crate::util::malloc::malloc_ms_util::*; use crate::util::metadata::side_metadata::{ @@ -23,6 +24,7 @@ use crate::util::{conversions, metadata}; use crate::vm::VMBinding; use crate::vm::{ActivePlan, Collection, ObjectModel}; use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK}; + #[cfg(debug_assertions)] use std::collections::HashMap; use std::marker::PhantomData; @@ -123,7 +125,7 @@ impl SFT for MallocSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - trace!("initialize_object_metadata for object {}", object); + log::trace!("initialize_object_metadata for object {}", object); set_vo_bit(object); } @@ -397,12 +399,12 @@ impl MallocSpace { // indirect call instructions in the generated assembly fn free_internal(&self, addr: Address, bytes: usize, offset_malloc_bit: bool) { if offset_malloc_bit { - trace!("Free memory {:x}", addr); + log::trace!("Free memory {:x}", addr); offset_free(addr); unsafe { unset_offset_malloc_bit_unsafe(addr) }; } else { let ptr = addr.to_mut_ptr(); - trace!("Free memory {:?}", ptr); + log::trace!("Free memory {:?}", ptr); unsafe { free(ptr); } @@ -499,7 +501,7 @@ impl MallocSpace { chunk += BYTES_IN_CHUNK; } - debug!("Generated {} sweep work packets", work_packets.len()); + log::debug!("Generated {} sweep work packets", work_packets.len()); #[cfg(debug_assertions)] { self.total_work_packets @@ -551,11 +553,11 @@ impl MallocSpace { // We are the only thread that is dealing with the object. We can use non-atomic methods for the metadata. if !unsafe { is_marked_unsafe::(object) } { // Dead object - trace!("Object {} has been allocated but not marked", object); + log::trace!("Object {} has been allocated but not marked", object); // Free object self.free_internal(obj_start, bytes, offset_malloc); - trace!("free object {}", object); + log::trace!("free object {}", object); unsafe { unset_vo_bit_unsafe(object) }; true @@ -586,7 +588,7 @@ impl MallocSpace { /// Used when each chunk is done. Only called in debug build. #[cfg(debug_assertions)] fn debug_sweep_chunk_done(&self, live_bytes_in_the_chunk: usize) { - debug!( + log::debug!( "Used bytes after releasing: {}", self.active_bytes.load(Ordering::SeqCst) ); @@ -596,7 +598,7 @@ impl MallocSpace { .fetch_add(live_bytes_in_the_chunk, Ordering::SeqCst); if completed_packets == self.total_work_packets.load(Ordering::Relaxed) { - trace!( + log::trace!( "work_live_bytes = {}, live_bytes = {}, active_bytes = {}", self.work_live_bytes.load(Ordering::Relaxed), live_bytes_in_the_chunk, @@ -628,7 +630,7 @@ impl MallocSpace { #[cfg(debug_assertions)] let mut live_bytes = 0; - debug!("Check active chunk {:?}", chunk_start); + log::debug!("Check active chunk {:?}", chunk_start); let mut address = chunk_start; let chunk_end = chunk_start + BYTES_IN_CHUNK; @@ -748,7 +750,7 @@ impl MallocSpace { #[cfg(debug_assertions)] let mut live_bytes = 0; - debug!("Check active chunk {:?}", chunk_start); + log::debug!("Check active chunk {:?}", chunk_start); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index 7fb5b5738a..7226cc0f96 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -1,5 +1,6 @@ use crate::util::conversions; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; +use crate::util::log; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; @@ -7,6 +8,7 @@ use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::{ObjectModel, VMBinding}; + use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -92,7 +94,7 @@ fn map_active_chunk_metadata(chunk_start: Address) { #[cfg(target_pointer_width = "32")] let size = 512 * BYTES_IN_CHUNK; - debug!( + log::debug!( "chunk_start = {} mapping space for {} -> {}", chunk_start, start, @@ -140,7 +142,7 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size // Set the chunk mark at the end. So if we have chunk mark set, we know we have mapped side metadata // for the chunk. - trace!("set chunk mark bit for {}", start); + log::trace!("set chunk mark bit for {}", start); set_chunk_mark(start); }; diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 5bef9a3e52..243b73c717 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -7,6 +7,7 @@ use super::MarkSweepSpace; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::chunk_map::*; use crate::util::linear_scan::Region; +use crate::util::log; use crate::util::object_enum::BlockMayHaveObjects; use crate::vm::ObjectModel; use crate::{ @@ -328,7 +329,7 @@ impl Block { // Current cursor let mut cursor = cell; - debug!("Sweep block {:?}, cell size {}", self, cell_size); + log::debug!("Sweep block {:?}, cell size {}", self, cell_size); while cell + cell_size <= self.end() { // possible object ref @@ -338,7 +339,7 @@ impl Block { cursor + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND, ) }; - trace!( + log::trace!( "{:?}: cell = {}, last cell in free list = {}, cursor = {}, potential object = {}", self, cell, @@ -350,7 +351,7 @@ impl Block { if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC .is_marked::(potential_object_ref, Ordering::SeqCst) { - debug!("{:?} Live cell: {}", self, cell); + log::debug!("{:?} Live cell: {}", self, cell); // If the mark bit is set, the cell is alive. // We directly jump to the end of the cell. cell += cell_size; @@ -361,9 +362,11 @@ impl Block { if cursor >= cell + cell_size { // We now stepped to the next cell. This means we did not find mark bit in the current cell, and we can add this cell to free list. - debug!( + log::debug!( "{:?} Free cell: {}, last cell in freelist is {}", - self, cell, last + self, + cell, + last ); // Clear VO bit: we don't know where the object reference actually is, so we bulk zero the cell. diff --git a/src/policy/sft.rs b/src/policy/sft.rs index cab4cc987a..80275e18f5 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -131,7 +131,7 @@ impl SFT for EmptySpaceSFT { } #[cfg(feature = "sanity")] fn is_sane(&self) -> bool { - warn!("Object in empty space!"); + crate::util::log::warn!("Object in empty space!"); false } #[cfg(feature = "object_pinning")] diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 8fccf9cd5f..6216ea093f 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -1,4 +1,5 @@ use super::sft::*; +use crate::util::log; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; @@ -120,7 +121,7 @@ impl SFTRefStorage { pub fn pre_use_check() { // If we do not have lock free operations, warn the users. if !AtomicDoubleWord::is_lock_free() { - warn!( + log::warn!( "SFT access word is not lock free on this platform. This will slow down SFT map." ); } @@ -397,7 +398,7 @@ mod dense_chunk_map { let first_chunk = conversions::chunk_align_down(start); let last_chunk = conversions::chunk_align_up(start + bytes); let mut chunk = first_chunk; - debug!( + log::debug!( "update {} (chunk {}) to {} (chunk {})", start, first_chunk, @@ -405,11 +406,11 @@ mod dense_chunk_map { last_chunk ); while chunk < last_chunk { - trace!("Update {} to index {}", chunk, index); + log::trace!("Update {} to index {}", chunk, index); SFT_DENSE_CHUNK_MAP_INDEX.store_atomic::(chunk, index, Ordering::SeqCst); chunk += BYTES_IN_CHUNK; } - debug!("update done"); + log::debug!("update done"); } unsafe fn clear(&self, address: Address) { @@ -502,7 +503,7 @@ mod sparse_chunk_map { #[allow(dead_code)] unsafe fn clear(&self, chunk_start: Address) { if DEBUG_SFT { - debug!( + log::debug!( "Clear SFT for chunk {} (was {})", chunk_start, self.get_checked(chunk_start).name() @@ -524,17 +525,19 @@ mod sparse_chunk_map { } fn log_update(&self, space: &(dyn SFT + Sync + 'static), start: Address, bytes: usize) { - debug!("Update SFT for Chunk {} as {}", start, space.name(),); + log::debug!("Update SFT for Chunk {} as {}", start, space.name(),); let first = start.chunk_index(); let start_chunk = chunk_index_to_address(first); - debug!( + log::debug!( "Update SFT for {} bytes of Chunk {} #{}", - bytes, start_chunk, first + bytes, + start_chunk, + first ); } fn trace_sft_map(&self) { - trace!("{}", self.print_sft_map()); + log::trace!("{}", self.print_sft_map()); } // This can be used during debugging to print SFT map. diff --git a/src/policy/space.rs b/src/policy/space.rs index 8048009b20..f6e8d357dc 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -29,6 +29,7 @@ use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::heap::HeapMeta; +use crate::util::log; use crate::util::memory::{self, HugePageSupport, MmapProtection, MmapStrategy}; use crate::vm::VMBinding; @@ -81,7 +82,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { } fn acquire(&self, tls: VMThread, pages: usize) -> Address { - trace!("Space.acquire, tls={:?}", tls); + log::trace!("Space.acquire, tls={:?}", tls); debug_assert!( !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE), @@ -97,14 +98,14 @@ pub trait Space: 'static + SFT + Sync + Downcast { // initialize_collection() has to be called so we know GC is initialized. let allow_gc = should_poll && self.common().global_state.is_initialized(); - trace!("Reserving pages"); + log::trace!("Reserving pages"); let pr = self.get_page_resource(); let pages_reserved = pr.reserve_pages(pages); - trace!("Pages reserved"); - trace!("Polling .."); + log::trace!("Pages reserved"); + log::trace!("Polling .."); if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) { - debug!("Collection required"); + log::debug!("Collection required"); assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); // Clear the request, and inform GC trigger about the pending allocation. @@ -116,7 +117,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator unsafe { Address::zero() } } else { - debug!("Collection not required"); + log::debug!("Collection not required"); // We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet // set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not @@ -128,7 +129,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { match pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) { Ok(res) => { - debug!( + log::debug!( "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}", res.start, res.pages, @@ -205,7 +206,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { ); } - debug!("Space.acquire(), returned = {}", res.start); + log::debug!("Space.acquire(), returned = {}", res.start); res.start } Err(_) => { @@ -255,7 +256,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks. */ fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) { - trace!( + log::trace!( "Grow space from {} for {} bytes (new chunk = {})", start, bytes, @@ -618,7 +619,7 @@ impl CommonSpace { panic!("failed to mmap meta memory"); } - debug!( + log::debug!( "Created space {} [{}, {}) for {} bytes", rtn.name, start, @@ -666,15 +667,15 @@ impl CommonSpace { } fn get_frac_available(frac: f32) -> usize { - trace!("AVAILABLE_START={}", vm_layout().available_start()); - trace!("AVAILABLE_END={}", vm_layout().available_end()); + log::trace!("AVAILABLE_START={}", vm_layout().available_start()); + log::trace!("AVAILABLE_END={}", vm_layout().available_end()); let bytes = (frac * vm_layout().available_bytes() as f32) as usize; - trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); + log::trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); let mb = bytes >> LOG_BYTES_IN_MBYTE; let rtn = mb << LOG_BYTES_IN_MBYTE; - trace!("rtn={}", rtn); + log::trace!("rtn={}", rtn); let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); - trace!("aligned_rtn={}", aligned_rtn); + log::trace!("aligned_rtn={}", aligned_rtn); aligned_rtn } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 60199c0fde..7606dcbd22 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -8,6 +8,7 @@ use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::externalpageresource::{ExternalPageResource, ExternalPages}; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::heap::PageResource; +use crate::util::log; use crate::util::metadata::mark_bit::MarkState; #[cfg(feature = "set_unlog_bits_vm_space")] use crate::util::metadata::MetadataSpec; @@ -219,9 +220,12 @@ impl VMSpace { ) .is_empty()); - debug!( + log::debug!( "Align VM space ({}, {}) to chunk ({}, {})", - start, end, chunk_start, chunk_end + start, + end, + chunk_start, + chunk_end ); // Mark as mapped in mmapper diff --git a/src/scheduler/affinity.rs b/src/scheduler/affinity.rs index d76a517d15..88d7630ffd 100644 --- a/src/scheduler/affinity.rs +++ b/src/scheduler/affinity.rs @@ -1,4 +1,5 @@ use super::worker::ThreadId; +use crate::util::log; use crate::util::options::AffinityKind; #[cfg(target_os = "linux")] use libc::{cpu_set_t, sched_getaffinity, sched_setaffinity, CPU_COUNT, CPU_SET, CPU_ZERO}; @@ -35,7 +36,7 @@ impl AffinityKind { AffinityKind::OsDefault => {} AffinityKind::RoundRobin(cpuset) => { let cpu = cpuset[thread % cpuset.len()]; - debug!("Set affinity for thread {} to core {}", thread, cpu); + log::debug!("Set affinity for thread {} to core {}", thread, cpu); bind_current_thread_to_core(cpu); } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 82be1f3561..d941805110 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -54,7 +54,7 @@ impl Prepare { impl GCWork for Prepare { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("Prepare Global"); + log::trace!("Prepare Global"); // We assume this is the only running work packet that accesses plan at the point of execution let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.prepare(worker.tls); @@ -93,7 +93,7 @@ impl PrepareMutator { impl GCWork for PrepareMutator { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("Prepare Mutator"); + log::trace!("Prepare Mutator"); self.mutator.prepare(worker.tls); } } @@ -104,7 +104,7 @@ pub struct PrepareCollector; impl GCWork for PrepareCollector { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("Prepare Collector"); + log::trace!("Prepare Collector"); worker.get_copy_context_mut().prepare(); mmtk.get_plan().prepare_worker(worker); } @@ -131,7 +131,7 @@ unsafe impl Send for Release {} impl GCWork for Release { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("Release Global"); + log::trace!("Release Global"); mmtk.gc_trigger.policy.on_gc_release(mmtk); // We assume this is the only running work packet that accesses plan at the point of execution @@ -180,7 +180,7 @@ impl ReleaseMutator { impl GCWork for ReleaseMutator { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("Release Mutator"); + log::trace!("Release Mutator"); self.mutator.release(worker.tls); } } @@ -191,7 +191,7 @@ pub struct ReleaseCollector; impl GCWork for ReleaseCollector { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("Release Collector"); + log::trace!("Release Collector"); worker.get_copy_context_mut().release(); } } @@ -210,7 +210,7 @@ impl StopMutators { impl GCWork for StopMutators { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("stop_all_mutators start"); + log::trace!("stop_all_mutators start"); mmtk.state.prepare_for_stack_scanning(); ::VMCollection::stop_all_mutators(worker.tls, |mutator| { // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused). @@ -218,7 +218,7 @@ impl GCWork for StopMutators { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] .add(ScanMutatorRoots::(mutator)); }); - trace!("stop_all_mutators end"); + log::trace!("stop_all_mutators end"); mmtk.scheduler.notify_mutators_paused(mmtk); mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::::new()); } @@ -330,7 +330,7 @@ impl VMProcessWeakRefs { impl GCWork for VMProcessWeakRefs { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("VMProcessWeakRefs"); + log::trace!("VMProcessWeakRefs"); let stage = WorkBucketStage::VMRefClosure; @@ -373,7 +373,7 @@ impl VMForwardWeakRefs { impl GCWork for VMForwardWeakRefs { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("VMForwardWeakRefs"); + log::trace!("VMForwardWeakRefs"); let stage = WorkBucketStage::VMRefForwarding; @@ -398,9 +398,9 @@ pub struct VMPostForwarding { impl GCWork for VMPostForwarding { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { - trace!("VMPostForwarding start"); + log::trace!("VMPostForwarding start"); ::VMCollection::post_forwarding(worker.tls); - trace!("VMPostForwarding end"); + log::trace!("VMPostForwarding end"); } } @@ -408,7 +408,7 @@ pub struct ScanMutatorRoots(pub &'static mut Mutator); impl GCWork for ScanMutatorRoots { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls()); + log::trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls()); let mutators = ::VMActivePlan::number_of_mutators(); let factory = ProcessEdgesWorkRootsWorkFactory::< C::VM, @@ -442,7 +442,7 @@ impl ScanVMSpecificRoots { impl GCWork for ScanVMSpecificRoots { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("ScanStaticRoots"); + log::trace!("ScanStaticRoots"); let factory = ProcessEdgesWorkRootsWorkFactory::< C::VM, C::DefaultProcessEdges, @@ -659,7 +659,7 @@ impl GCWork for E { if self.roots && !_mmtk.is_in_sanity() { self.cache_roots_for_sanity_gc(); } - trace!("ProcessEdgesWork End"); + log::trace!("ProcessEdgesWork End"); } } @@ -839,7 +839,7 @@ pub trait ScanObjectsWork: GCWork + Sized { .increase_live_bytes(VM::VMObjectModel::get_current_size(object)); if ::VMScanning::support_slot_enqueuing(tls, object) { - trace!("Scan object (slot) {}", object); + log::trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. ::VMScanning::scan_object(tls, object, &mut closure); self.post_scan_object(object); @@ -869,7 +869,7 @@ pub trait ScanObjectsWork: GCWork + Sized { object_tracer_context.with_tracer(worker, |object_tracer| { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { - trace!("Scan object (node) {}", object); + log::trace!("Scan object (node) {}", object); ::VMScanning::scan_object_and_trace_edges( tls, object, @@ -922,9 +922,9 @@ impl> ScanObjectsWork for ScanOb impl GCWork for ScanObjects { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("ScanObjects"); + log::trace!("ScanObjects"); self.do_work_common(&self.buffer, worker, mmtk); - trace!("ScanObjects End"); + log::trace!("ScanObjects End"); } } @@ -1048,9 +1048,9 @@ impl + PlanTraceObject> GCWork { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("PlanScanObjects"); + log::trace!("PlanScanObjects"); self.do_work_common(&self.buffer, worker, mmtk); - trace!("PlanScanObjects End"); + log::trace!("PlanScanObjects End"); } } @@ -1099,7 +1099,7 @@ impl, O2OPE: ProcessEdgesWork { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("ProcessRootNode"); + log::trace!("ProcessRootNode"); #[cfg(feature = "sanity")] { @@ -1143,7 +1143,7 @@ impl, O2OPE: ProcessEdgesWork GCWorkScheduler { pub fn stop_gc_threads_for_forking(self: &Arc) { self.worker_group.prepare_surrender_buffer(); - debug!("A mutator is requesting GC threads to stop for forking..."); + log::debug!("A mutator is requesting GC threads to stop for forking..."); self.worker_monitor.make_request(WorkerGoal::StopForFork); } @@ -106,7 +108,7 @@ impl GCWorkScheduler { let all_surrendered = self.worker_group.surrender_gc_worker(worker); if all_surrendered { - debug!( + log::debug!( "All {} workers surrendered.", self.worker_group.worker_count() ); @@ -128,7 +130,7 @@ impl GCWorkScheduler { /// Request a GC to be scheduled. Called by mutator via `GCRequester`. pub(crate) fn request_schedule_collection(&self) { - debug!("A mutator is sending GC-scheduling request to workers..."); + log::debug!("A mutator is sending GC-scheduling request to workers..."); self.worker_monitor.make_request(WorkerGoal::Gc); } @@ -245,7 +247,7 @@ impl GCWorkScheduler { let mut new_packets = false; for (id, work_bucket) in self.work_buckets.iter() { if work_bucket.is_activated() && work_bucket.maybe_schedule_sentinel() { - trace!("Scheduled sentinel packet into {:?}", id); + log::trace!("Scheduled sentinel packet into {:?}", id); new_packets = true; } } @@ -274,13 +276,13 @@ impl GCWorkScheduler { new_packets = new_packets || !bucket.is_drained(); if new_packets { // Quit the loop. There are already new packets in the newly opened buckets. - trace!("Found new packets at stage {:?}. Break.", id); + log::trace!("Found new packets at stage {:?}. Break.", id); break; } new_packets = new_packets || bucket.maybe_schedule_sentinel(); if new_packets { // Quit the loop. A sentinel packet is added to the newly opened buckets. - trace!("Sentinel is scheduled at stage {:?}. Break.", id); + log::trace!("Sentinel is scheduled at stage {:?}. Break.", id); break; } } @@ -320,7 +322,7 @@ impl GCWorkScheduler { let mut error_example = None; for (id, bucket) in self.work_buckets.iter() { if bucket.is_activated() && !bucket.is_empty() { - error!("Work bucket {:?} is active but not empty!", id); + log::error!("Work bucket {:?} is active but not empty!", id); // This error can be hard to reproduce. // If an error happens in the release build where logs are turned off, // we should show at least one abnormal bucket in the panic message @@ -427,7 +429,7 @@ impl GCWorkScheduler { ); // We are in the middle of GC, and the last GC worker parked. - trace!("The last worker parked during GC. Try to find more work to do..."); + log::trace!("The last worker parked during GC. Try to find more work to do..."); // During GC, if all workers parked, all open buckets must have been drained. self.assert_all_activated_buckets_are_empty(); @@ -470,7 +472,7 @@ impl GCWorkScheduler { match goal { WorkerGoal::Gc => { - trace!("A mutator requested a GC to be scheduled."); + log::trace!("A mutator requested a GC to be scheduled."); // We set the eBPF trace point here so that bpftrace scripts can start recording // work packet events before the `ScheduleCollection` work packet starts. @@ -486,7 +488,7 @@ impl GCWorkScheduler { LastParkedResult::WakeSelf } WorkerGoal::StopForFork => { - trace!("A mutator wanted to fork."); + log::trace!("A mutator wanted to fork."); LastParkedResult::WakeAll } } @@ -495,19 +497,19 @@ impl GCWorkScheduler { /// Find more work for workers to do. Return true if more work is available. fn find_more_work_for_workers(&self) -> bool { if self.worker_group.has_designated_work() { - trace!("Some workers have designated work."); + log::trace!("Some workers have designated work."); return true; } // See if any bucket has a sentinel. if self.schedule_sentinels() { - trace!("Some sentinels are scheduled."); + log::trace!("Some sentinels are scheduled."); return true; } // Try to open new buckets. if self.update_buckets() { - trace!("Some buckets are opened."); + log::trace!("Some buckets are opened."); return true; } @@ -543,7 +545,7 @@ impl GCWorkScheduler { }; let elapsed = start_time.elapsed(); - info!( + log::info!( "End of GC ({}/{} pages, took {} ms)", mmtk.get_plan().get_reserved_pages(), mmtk.get_plan().get_total_pages(), @@ -563,7 +565,7 @@ impl GCWorkScheduler { "Live bytes of all live objects ({} bytes) is larger than used pages ({} bytes), something is wrong.", live_bytes, used_bytes ); - info!( + log::info!( "Live objects = {} bytes ({:04.1}% of {} used pages)", live_bytes, live_bytes as f64 * 100.0 / used_bytes as f64, diff --git a/src/scheduler/work.rs b/src/scheduler/work.rs index c74506cae6..cf5b50bdd6 100644 --- a/src/scheduler/work.rs +++ b/src/scheduler/work.rs @@ -1,6 +1,8 @@ use super::worker::*; use crate::mmtk::MMTK; +use crate::util::log; use crate::vm::VMBinding; + #[cfg(feature = "work_packet_stats")] use std::any::{type_name, TypeId}; @@ -31,7 +33,7 @@ pub trait GCWork: 'static + Send { /// If the feature "work_packet_stats" is not enabled, this call simply forwards the call /// to `do_work()`. fn do_work_with_stat(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - debug!("{}", std::any::type_name::()); + log::debug!("{}", std::any::type_name::()); debug_assert!(!worker.tls.0.0.is_null(), "TLS must be set correctly for a GC worker before the worker does any work. GC Worker {} has no valid tls.", worker.ordinal); #[cfg(feature = "work_packet_stats")] diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs index babfd32f29..5aa74a3ffc 100644 --- a/src/scheduler/worker.rs +++ b/src/scheduler/worker.rs @@ -3,8 +3,10 @@ use super::work_bucket::*; use super::*; use crate::mmtk::MMTK; use crate::util::copy::GCWorkerCopyContext; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::vm::{Collection, GCThreadContext, VMBinding}; + use atomic::Atomic; use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut}; use crossbeam::deque::{self, Stealer}; @@ -207,7 +209,7 @@ impl GCWorker { /// * `mmtk`: A reference to an MMTk instance. pub fn run(mut self: Box, tls: VMWorkerThread, mmtk: &'static MMTK) { probe!(mmtk, gcworker_run); - debug!( + log::debug!( "Worker started. ordinal: {}, {}", self.ordinal, crate::util::rust_util::debug_process_thread_id(), @@ -243,7 +245,7 @@ impl GCWorker { probe!(mmtk, work, typename.as_ptr(), typename.len()); work.do_work_with_stat(&mut self, mmtk); } - debug!( + log::debug!( "Worker exiting. ordinal: {}, {}", self.ordinal, crate::util::rust_util::debug_process_thread_id(), @@ -346,7 +348,7 @@ impl WorkerGroup { local_work_queues: Vec>>>, mmtk: &'static MMTK, ) -> Vec>> { - debug!("Creating GCWorker instances..."); + log::debug!("Creating GCWorker instances..."); assert_eq!(self.workers_shared.len(), local_work_queues.len()); @@ -365,14 +367,14 @@ impl WorkerGroup { }) .collect::>(); - debug!("Created {} GCWorker instances.", workers.len()); + log::debug!("Created {} GCWorker instances.", workers.len()); workers } /// Spawn all the worker threads #[allow(clippy::vec_box)] // See `WorkerCreationState::Surrendered`. fn spawn(&self, workers: Vec>>, tls: VMThread) { - debug!( + log::debug!( "Spawning GC workers. {}", crate::util::rust_util::debug_process_thread_id(), ); @@ -382,7 +384,7 @@ impl WorkerGroup { VM::VMCollection::spawn_gc_thread(tls, GCThreadContext::::Worker(worker)); } - debug!( + log::debug!( "Spawned {} worker threads. {}", self.worker_count(), crate::util::rust_util::debug_process_thread_id(), @@ -408,7 +410,7 @@ impl WorkerGroup { }; let ordinal = worker.ordinal; workers.push(worker); - trace!( + log::trace!( "Worker {} surrendered. ({}/{})", ordinal, workers.len(), diff --git a/src/scheduler/worker_monitor.rs b/src/scheduler/worker_monitor.rs index c84dedfcd3..16d03d1b5d 100644 --- a/src/scheduler/worker_monitor.rs +++ b/src/scheduler/worker_monitor.rs @@ -4,6 +4,8 @@ //! - letting the last parked worker take action, and //! - letting workers and mutators notify workers when workers are given things to do. +use crate::util::log; + use std::sync::{Condvar, Mutex}; use super::{ @@ -136,7 +138,7 @@ impl WorkerMonitor { // Park this worker let all_parked = sync.parker.inc_parked_workers(); - trace!( + log::trace!( "Worker {} parked. parked/total: {}/{}. All parked: {}", ordinal, sync.parker.parked_workers, @@ -147,7 +149,7 @@ impl WorkerMonitor { let mut should_wait = false; if all_parked { - trace!("Worker {} is the last worker parked.", ordinal); + log::trace!("Worker {} is the last worker parked.", ordinal); let result = on_last_parked(&mut sync.goals); match result { LastParkedResult::ParkSelf => { @@ -223,7 +225,7 @@ impl WorkerMonitor { // Unpark this worker. sync.parker.dec_parked_workers(); - trace!( + log::trace!( "Worker {} unparked. parked/total: {}/{}.", ordinal, sync.parker.parked_workers, diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index a60d26935c..3dedcf050e 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -1,21 +1,21 @@ use crate::global_state::GlobalState; +use crate::policy::space::Space; use crate::util::address::Address; #[cfg(feature = "analysis")] use crate::util::analysis::AnalysisManager; +use crate::util::constants::*; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::log; +use crate::util::opaque_pointer::*; use crate::util::options::Options; +use crate::vm::VMBinding; +use crate::vm::{ActivePlan, Collection}; use crate::MMTK; +use downcast_rs::Downcast; use std::sync::atomic::Ordering; use std::sync::Arc; -use crate::policy::space::Space; -use crate::util::constants::*; -use crate::util::opaque_pointer::*; -use crate::vm::VMBinding; -use crate::vm::{ActivePlan, Collection}; -use downcast_rs::Downcast; - #[repr(C)] #[derive(Debug)] /// A list of errors that MMTk can encounter during allocation. @@ -113,7 +113,7 @@ pub fn get_maximum_aligned_size_inner( alignment: usize, known_alignment: usize, ) -> usize { - trace!( + log::trace!( "size={}, alignment={}, known_alignment={}, MIN_ALIGNMENT={}", size, alignment, @@ -295,7 +295,7 @@ pub trait Allocator: Downcast { // an analysis counter specific allocation hook, then here is the place to do so #[cfg(feature = "analysis")] if _allocation_bytes > *self.get_context().options.analysis_factor { - trace!( + log::trace!( "Analysis: allocation_bytes = {} more than analysis_factor = {}", _allocation_bytes, *self.get_context().options.analysis_factor @@ -318,7 +318,7 @@ pub trait Allocator: Downcast { // We cannot just rely on the local var. Instead, we get the emergency collection value again, // and check both. if emergency_collection && self.get_context().state.is_emergency_collection() { - trace!("Emergency collection"); + log::trace!("Emergency collection"); // Report allocation success to assist OutOfMemory handling. // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory) let fail_with_oom = !self @@ -326,10 +326,10 @@ pub trait Allocator: Downcast { .state .allocation_success .swap(true, Ordering::SeqCst); - trace!("fail with oom={}", fail_with_oom); + log::trace!("fail with oom={}", fail_with_oom); if fail_with_oom { // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM - trace!("Throw HeapOutOfMemory!"); + log::trace!("Throw HeapOutOfMemory!"); VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory); self.get_context() .state @@ -352,7 +352,7 @@ pub trait Allocator: Downcast { // Record whether last collection was an Emergency collection. If so, we make one more // attempt to allocate before we signal an OOM. emergency_collection = self.get_context().state.is_emergency_collection(); - trace!("Got emergency collection as {}", emergency_collection); + log::trace!("Got emergency collection as {}", emergency_collection); previous_result_zero = true; } } @@ -422,7 +422,7 @@ pub trait Allocator: Downcast { // If an allocator does thread local allocation but does not override this method to // provide a correct implementation, we will log a warning. if self.does_thread_local_allocation() && need_poll { - warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::()); + log::warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::()); } self.alloc_slow_once_traced(size, align, offset) } diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 76cc628c89..7b3fbbd923 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -6,6 +6,7 @@ use crate::util::alloc::Allocator; use crate::policy::space::Space; use crate::util::conversions::bytes_to_pages_up; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -99,17 +100,17 @@ impl Allocator for BumpAllocator { } fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("alloc"); + log::trace!("alloc"); let result = align_allocation_no_fill::(self.bump_pointer.cursor, align, offset); let new_cursor = result + size; if new_cursor > self.bump_pointer.limit { - trace!("Thread local buffer used up, go to alloc slow path"); + log::trace!("Thread local buffer used up, go to alloc slow path"); self.alloc_slow(size, align, offset) } else { fill_alignment_gap::(self.bump_pointer.cursor, result); self.bump_pointer.cursor = new_cursor; - trace!( + log::trace!( "Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}", size, result, @@ -121,7 +122,7 @@ impl Allocator for BumpAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("alloc_slow"); + log::trace!("alloc_slow"); self.acquire_block(size, align, offset, false) } @@ -144,7 +145,7 @@ impl Allocator for BumpAllocator { return self.acquire_block(size, align, offset, true); } - trace!("alloc_slow stress_test"); + log::trace!("alloc_slow stress_test"); let result = align_allocation_no_fill::(self.bump_pointer.cursor, align, offset); let new_cursor = result + size; @@ -157,7 +158,7 @@ impl Allocator for BumpAllocator { fill_alignment_gap::(self.bump_pointer.cursor, result); self.bump_pointer.limit -= new_cursor - self.bump_pointer.cursor; self.bump_pointer.cursor = new_cursor; - trace!( + log::trace!( "alloc_slow: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}", size, result, @@ -201,10 +202,10 @@ impl BumpAllocator { let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); let acquired_start = self.space.acquire(self.tls, bytes_to_pages_up(block_size)); if acquired_start.is_zero() { - trace!("Failed to acquire a new block"); + log::trace!("Failed to acquire a new block"); acquired_start } else { - trace!( + log::trace!( "Acquired a new block of size {} with start address {}", block_size, acquired_start diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 28958a1759..2a37a5cdfd 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -6,6 +6,7 @@ use crate::policy::marksweepspace::native_ms::*; use crate::util::alloc::allocator; use crate::util::alloc::Allocator; use crate::util::linear_scan::Region; +use crate::util::log; use crate::util::Address; use crate::util::VMThread; use crate::vm::VMBinding; @@ -108,7 +109,7 @@ impl Allocator for FreeListAllocator { offset: usize, need_poll: bool, ) -> Address { - trace!("allow slow precise stress s={}", size); + log::trace!("allow slow precise stress s={}", size); if need_poll { self.acquire_global_block(0, 0, true); } @@ -298,13 +299,13 @@ impl FreeListAllocator { loop { match self.space.acquire_block(self.tls, size, align) { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => { - debug!("Acquire global block: None"); + log::debug!("Acquire global block: None"); // GC return None; } crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => { - debug!("Acquire global block: Fresh {:?}", block); + log::debug!("Acquire global block: Fresh {:?}", block); self.add_to_available_blocks(bin, block, stress_test); self.init_block(block, self.available_blocks[bin].size); @@ -312,7 +313,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => { - debug!("Acquire global block: AbandonedAvailable {:?}", block); + log::debug!("Acquire global block: AbandonedAvailable {:?}", block); block.store_tls(self.tls); if block.has_free_cells() { self.add_to_available_blocks(bin, block, stress_test); @@ -323,7 +324,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => { - debug!("Acquire global block: AbandonedUnswep {:?}", block); + log::debug!("Acquire global block: AbandonedUnswep {:?}", block); block.store_tls(self.tls); block.sweep::(); if block.has_free_cells() { diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index abff840937..38f74fc443 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -8,6 +8,7 @@ use crate::policy::space::Space; use crate::util::alloc::allocator::get_maximum_aligned_size; use crate::util::alloc::Allocator; use crate::util::linear_scan::Region; +use crate::util::log; use crate::util::opaque_pointer::VMThread; use crate::util::rust_util::unlikely; use crate::util::Address; @@ -73,7 +74,7 @@ impl Allocator for ImmixAllocator { let new_cursor = result + size; if new_cursor > self.bump_pointer.limit { - trace!( + log::trace!( "{:?}: Thread local buffer used up, go to alloc slow path", self.tls ); @@ -88,7 +89,7 @@ impl Allocator for ImmixAllocator { // Simple bump allocation. fill_alignment_gap::(self.bump_pointer.cursor, result); self.bump_pointer.cursor = new_cursor; - trace!( + log::trace!( "{:?}: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}", self.tls, size, @@ -102,7 +103,7 @@ impl Allocator for ImmixAllocator { /// Acquire a clean block from ImmixSpace for allocation. fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("{:?}: alloc_slow_once", self.tls); + log::trace!("{:?}: alloc_slow_once", self.tls); self.acquire_clean_block(size, align, offset) } @@ -117,11 +118,11 @@ impl Allocator for ImmixAllocator { offset: usize, need_poll: bool, ) -> Address { - trace!("{:?}: alloc_slow_once_precise_stress", self.tls); + log::trace!("{:?}: alloc_slow_once_precise_stress", self.tls); // If we are required to make a poll, we call acquire_clean_block() which will acquire memory // from the space which includes a GC poll. if need_poll { - trace!( + log::trace!( "{:?}: alloc_slow_once_precise_stress going to poll", self.tls ); @@ -129,7 +130,7 @@ impl Allocator for ImmixAllocator { // Set fake limits so later allocation will fail in the fastpath, and end up going to this // special slowpath. self.set_limit_for_stress(); - trace!( + log::trace!( "{:?}: alloc_slow_once_precise_stress done - forced stress poll", self.tls ); @@ -144,7 +145,7 @@ impl Allocator for ImmixAllocator { let ret = if self.require_new_block(size, align, offset) { // We don't have enough space in thread local block to service the allocation request, // hence allocate a new block - trace!( + log::trace!( "{:?}: alloc_slow_once_precise_stress - acquire new block", self.tls ); @@ -152,7 +153,7 @@ impl Allocator for ImmixAllocator { } else { // This `alloc()` call should always succeed given the if-branch checks if we are out // of thread local block space - trace!("{:?}: alloc_slow_once_precise_stress - alloc()", self.tls,); + log::trace!("{:?}: alloc_slow_once_precise_stress - alloc()", self.tls,); self.alloc(size, align, offset) }; // Set fake limits @@ -191,7 +192,7 @@ impl ImmixAllocator { /// Large-object (larger than a line) bump allocation. fn overflow_alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("{:?}: overflow_alloc", self.tls); + log::trace!("{:?}: overflow_alloc", self.tls); let start = align_allocation_no_fill::(self.large_bump_pointer.cursor, align, offset); let end = start + size; if end > self.large_bump_pointer.limit { @@ -208,7 +209,7 @@ impl ImmixAllocator { /// Bump allocate small objects into recyclable lines (i.e. holes). fn alloc_slow_hot(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("{:?}: alloc_slow_hot", self.tls); + log::trace!("{:?}: alloc_slow_hot", self.tls); if self.acquire_recyclable_lines(size, align, offset) { // If stress test is active, then we need to go to the slow path instead of directly // calling `alloc()`. This is because the `acquire_recyclable_lines()` function @@ -241,7 +242,7 @@ impl ImmixAllocator { // Find recyclable lines. Update the bump allocation cursor and limit. self.bump_pointer.cursor = start_line.start(); self.bump_pointer.limit = end_line.start(); - trace!( + log::trace!( "{:?}: acquire_recyclable_lines -> {:?} [{:?}, {:?}) {:?}", self.tls, self.line, @@ -278,7 +279,7 @@ impl ImmixAllocator { fn acquire_recyclable_block(&mut self) -> bool { match self.immix_space().get_reusable_block(self.copy) { Some(block) => { - trace!("{:?}: acquire_recyclable_block -> {:?}", self.tls, block); + log::trace!("{:?}: acquire_recyclable_block -> {:?}", self.tls, block); // Set the hole-searching cursor to the start of this block. self.line = Some(block.start_line()); true @@ -292,7 +293,7 @@ impl ImmixAllocator { match self.immix_space().get_clean_block(self.tls, self.copy) { None => Address::ZERO, Some(block) => { - trace!( + log::trace!( "{:?}: Acquired a new block {:?} -> {:?}", self.tls, block.start(), @@ -342,7 +343,7 @@ impl ImmixAllocator { let new_limit = unsafe { Address::from_usize(self.bump_pointer.limit - self.bump_pointer.cursor) }; self.bump_pointer.limit = new_limit; - trace!( + log::trace!( "{:?}: set_limit_for_stress. normal c {} l {} -> {}", self.tls, self.bump_pointer.cursor, @@ -357,7 +358,7 @@ impl ImmixAllocator { Address::from_usize(self.large_bump_pointer.limit - self.large_bump_pointer.cursor) }; self.large_bump_pointer.limit = new_lg_limit; - trace!( + log::trace!( "{:?}: set_limit_for_stress. large c {} l {} -> {}", self.tls, self.large_bump_pointer.cursor, @@ -376,7 +377,7 @@ impl ImmixAllocator { let old_limit = self.bump_pointer.limit; let new_limit = self.bump_pointer.cursor + self.bump_pointer.limit.as_usize(); self.bump_pointer.limit = new_limit; - trace!( + log::trace!( "{:?}: restore_limit_for_stress. normal c {} l {} -> {}", self.tls, self.bump_pointer.cursor, @@ -390,7 +391,7 @@ impl ImmixAllocator { let new_lg_limit = self.large_bump_pointer.cursor + self.large_bump_pointer.limit.as_usize(); self.large_bump_pointer.limit = new_lg_limit; - trace!( + log::trace!( "{:?}: restore_limit_for_stress. large c {} l {} -> {}", self.tls, self.large_bump_pointer.cursor, diff --git a/src/util/alloc/markcompact_allocator.rs b/src/util/alloc/markcompact_allocator.rs index 01b2326ac9..b75ec7ebb7 100644 --- a/src/util/alloc/markcompact_allocator.rs +++ b/src/util/alloc/markcompact_allocator.rs @@ -4,6 +4,7 @@ use super::allocator::AllocatorContext; use super::BumpAllocator; use crate::policy::space::Space; use crate::util::alloc::Allocator; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::util::Address; use crate::vm::VMBinding; @@ -64,7 +65,7 @@ impl Allocator for MarkCompactAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { - trace!("alloc_slow"); + log::trace!("alloc_slow"); self.bump_allocator.alloc_slow_once(size, align, offset) } diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index aaea3b425a..a1102ce87d 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -8,12 +8,15 @@ use crate::policy::copyspace::CopySpaceCopyContext; use crate::policy::immix::ImmixSpace; use crate::policy::immix::{ImmixCopyContext, ImmixHybridCopyContext}; use crate::policy::space::Space; +#[cfg(debug_assertions)] +use crate::util::log; use crate::util::object_forwarding; use crate::util::opaque_pointer::VMWorkerThread; use crate::util::{Address, ObjectReference}; use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::MMTK; + use std::sync::atomic::Ordering; use enum_map::Enum; @@ -83,7 +86,7 @@ impl GCWorkerCopyContext { ) -> Address { #[cfg(debug_assertions)] if bytes > self.config.constraints.max_non_los_default_alloc_bytes { - warn!( + log::warn!( "Attempted to copy an object of {} bytes (> {}) which should be allocated with LOS and not be copied.", bytes, self.config.constraints.max_non_los_default_alloc_bytes ); diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 70f97bfd52..10dc1ae64a 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -1,12 +1,14 @@ use crate::plan::is_nursery_gc; use crate::scheduler::gc_work::ProcessEdgesWork; use crate::scheduler::{GCWork, GCWorker, WorkBucketStage}; +use crate::util::log; use crate::util::reference_processor::RescanReferences; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::Finalizable; use crate::vm::{Collection, VMBinding}; use crate::MMTK; + use std::marker::PhantomData; /// A special processor for Finalizable objects. @@ -53,10 +55,10 @@ impl FinalizableProcessor { for mut f in self.candidates.drain(start..).collect::>() { let reff = f.get_reference(); - trace!("Pop {:?} for finalization", reff); + log::trace!("Pop {:?} for finalization", reff); if reff.is_live() { FinalizableProcessor::::forward_finalizable_reference(e, &mut f); - trace!("{:?} is live, push {:?} back to candidates", reff, f); + log::trace!("{:?} is live, push {:?} back to candidates", reff, f); self.candidates.push(f); continue; } @@ -153,7 +155,7 @@ impl GCWork for Finalization { } let mut finalizable_processor = mmtk.finalizable_processor.lock().unwrap(); - debug!( + log::debug!( "Finalization, {} objects in candidates, {} objects ready to finalize", finalizable_processor.candidates.len(), finalizable_processor.ready_for_finalize.len() @@ -162,7 +164,7 @@ impl GCWork for Finalization { let mut w = E::new(vec![], false, mmtk, WorkBucketStage::FinalRefClosure); w.set_worker(worker); finalizable_processor.scan(worker.tls, &mut w, is_nursery_gc(mmtk.get_plan())); - debug!( + log::debug!( "Finished finalization, {} objects in candidates, {} objects ready to finalize", finalizable_processor.candidates.len(), finalizable_processor.ready_for_finalize.len() @@ -180,14 +182,14 @@ pub struct ForwardFinalization(PhantomData); impl GCWork for ForwardFinalization { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - trace!("Forward finalization"); + log::trace!("Forward finalization"); let mut finalizable_processor = mmtk.finalizable_processor.lock().unwrap(); let mut w = E::new(vec![], false, mmtk, WorkBucketStage::FinalizableForwarding); w.set_worker(worker); finalizable_processor.forward_candidate(&mut w, is_nursery_gc(mmtk.get_plan())); finalizable_processor.forward_finalizable(&mut w, is_nursery_gc(mmtk.get_plan())); - trace!("Finished forwarding finlizable"); + log::trace!("Finished forwarding finlizable"); } } impl ForwardFinalization { diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index aa66fe232d..42e481fda1 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -14,10 +14,12 @@ use crate::util::heap::layout::vm_layout::*; use crate::util::heap::layout::CreateFreeListResult; use crate::util::heap::pageresource::CommonPageResource; use crate::util::heap::space_descriptor::SpaceDescriptor; +use crate::util::log; use crate::util::memory; use crate::util::opaque_pointer::*; use crate::util::raw_memory_freelist::RawMemoryFreeList; use crate::vm::*; + use std::marker::PhantomData; const UNINITIALIZED_WATER_MARK: i32 = -1; @@ -153,9 +155,10 @@ impl FreeListPageResource { // If it is RawMemoryFreeList, it will occupy `space_displacement` bytes at the start of // the space. We add it to the start address. let actual_start = start + space_displacement; - debug!( + log::debug!( " in new_contiguous: space_displacement = {:?}, actual_start = {}", - space_displacement, actual_start + space_displacement, + actual_start ); let growable = cfg!(target_pointer_width = "64"); @@ -195,7 +198,7 @@ impl FreeListPageResource { // Discontiguous free list page resources are only used by `Map32` which uses // `IntArrayFreeList` exclusively. It does not have space displacement. debug_assert_eq!(space_displacement, 0); - debug!("new_discontiguous. start: {start})"); + log::debug!("new_discontiguous. start: {start})"); FreeListPageResource { common: CommonPageResource::new(false, true, vm_map), diff --git a/src/util/heap/gc_trigger.rs b/src/util/heap/gc_trigger.rs index 94ba63fe6e..dd5ad286e6 100644 --- a/src/util/heap/gc_trigger.rs +++ b/src/util/heap/gc_trigger.rs @@ -6,9 +6,11 @@ use crate::plan::Plan; use crate::policy::space::Space; use crate::util::constants::BYTES_IN_PAGE; use crate::util::conversions; +use crate::util::log; use crate::util::options::{GCTriggerSelector, Options, DEFAULT_MAX_NURSERY, DEFAULT_MIN_NURSERY}; use crate::vm::VMBinding; use crate::MMTK; + use std::mem::MaybeUninit; use std::sync::atomic::AtomicUsize; use std::sync::Arc; @@ -76,7 +78,7 @@ impl GCTrigger { .policy .is_gc_required(space_full, space.map(|s| SpaceStats::new(s)), plan) { - info!( + log::info!( "[POLL] {}{} ({}/{} pages)", if let Some(space) = space { format!("{}: ", space.get_name()) @@ -121,7 +123,7 @@ impl GCTrigger { let max_bytes = heap_size_bytes as f64 * max; let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE); if max_bytes > DEFAULT_MAX_NURSERY { - warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY); + log::warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY); DEFAULT_MAX_NURSERY } else { max_bytes @@ -144,7 +146,7 @@ impl GCTrigger { * min; let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE); if min_bytes < DEFAULT_MIN_NURSERY { - warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY); + log::warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY); DEFAULT_MIN_NURSERY } else { min_bytes @@ -362,13 +364,13 @@ impl MemBalancerStats { .gc_release_live_pages .saturating_sub(self.gc_end_live_pages); self.allocation_pages = promoted as f64; - trace!( + log::trace!( "promoted = mature live before release {} - mature live at prev gc end {} = {}", self.gc_release_live_pages, self.gc_end_live_pages, promoted ); - trace!( + log::trace!( "allocated pages (accumulated to) = {}", self.allocation_pages ); @@ -383,7 +385,7 @@ impl MemBalancerStats { self.gc_end_live_pages = plan.get_mature_reserved_pages(); // Use live pages as an estimate for pages traversed during GC self.collection_pages = self.gc_end_live_pages as f64; - trace!( + log::trace!( "collected pages = mature live at gc end {} - mature live at gc release {} = {}", self.gc_release_live_pages, self.gc_end_live_pages, @@ -404,7 +406,7 @@ impl MemBalancerStats { .get_plan() .get_reserved_pages() .saturating_sub(self.gc_end_live_pages) as f64; - trace!( + log::trace!( "allocated pages = used {} - live in last gc {} = {}", mmtk.get_plan().get_reserved_pages(), self.gc_end_live_pages, @@ -413,14 +415,14 @@ impl MemBalancerStats { } fn non_generational_mem_stats_on_gc_release(&mut self, mmtk: &'static MMTK) { self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages(); - trace!("live before release = {}", self.gc_release_live_pages); + log::trace!("live before release = {}", self.gc_release_live_pages); } fn non_generational_mem_stats_on_gc_end(&mut self, mmtk: &'static MMTK) { self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages(); - trace!("live pages = {}", self.gc_end_live_pages); + log::trace!("live pages = {}", self.gc_end_live_pages); // Use live pages as an estimate for pages traversed during GC self.collection_pages = self.gc_end_live_pages as f64; - trace!( + log::trace!( "collected pages = live at gc end {} - live at gc release {} = {}", self.gc_release_live_pages, self.gc_end_live_pages, @@ -445,11 +447,11 @@ impl GCTriggerPolicy for MemBalancerTrigger { } fn on_gc_start(&self, mmtk: &'static MMTK) { - trace!("=== on_gc_start ==="); + log::trace!("=== on_gc_start ==="); self.access_stats(|stats| { stats.gc_start_time = Instant::now(); stats.allocation_time += (stats.gc_start_time - stats.gc_end_time).as_secs_f64(); - trace!( + log::trace!( "gc_start = {:?}, allocation_time = {}", stats.gc_start_time, stats.allocation_time @@ -464,7 +466,7 @@ impl GCTriggerPolicy for MemBalancerTrigger { } fn on_gc_release(&self, mmtk: &'static MMTK) { - trace!("=== on_gc_release ==="); + log::trace!("=== on_gc_release ==="); self.access_stats(|stats| { if let Some(plan) = mmtk.get_plan().generational() { stats.generational_mem_stats_on_gc_release(plan); @@ -475,11 +477,11 @@ impl GCTriggerPolicy for MemBalancerTrigger { } fn on_gc_end(&self, mmtk: &'static MMTK) { - trace!("=== on_gc_end ==="); + log::trace!("=== on_gc_end ==="); self.access_stats(|stats| { stats.gc_end_time = Instant::now(); stats.collection_time += (stats.gc_end_time - stats.gc_start_time).as_secs_f64(); - trace!( + log::trace!( "gc_end = {:?}, collection_time = {}", stats.gc_end_time, stats.collection_time @@ -552,7 +554,7 @@ impl MemBalancerTrigger { extra_reserve: usize, stats: &mut MemBalancerStats, ) { - trace!("compute new heap limit: {:?}", stats); + log::trace!("compute new heap limit: {:?}", stats); // Constants from the original paper const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95; @@ -584,12 +586,12 @@ impl MemBalancerTrigger { stats.collection_time, COLLECTION_SMOOTH_FACTOR, ); - trace!( + log::trace!( "after smoothing, alloc mem = {}, alloc_time = {}", alloc_mem, alloc_time ); - trace!( + log::trace!( "after smoothing, gc mem = {}, gc_time = {}", gc_mem, gc_time @@ -623,7 +625,7 @@ impl MemBalancerTrigger { // This is the optimal heap limit due to mem balancer. We will need to clamp the value to the defined min/max range. let optimal_heap = live + e as usize + extra_reserve + pending_pages; - trace!( + log::trace!( "optimal = live {} + sqrt(live) {} + extra {}", live, e, @@ -632,9 +634,12 @@ impl MemBalancerTrigger { // The new heap size must be within min/max. let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages); - debug!( + log::debug!( "MemBalander: new heap limit = {} pages (optimal = {}, clamped to [{}, {}])", - new_heap, optimal_heap, self.min_heap_pages, self.max_heap_pages + new_heap, + optimal_heap, + self.min_heap_pages, + self.max_heap_pages ); self.current_heap_pages.store(new_heap, Ordering::Relaxed); } diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index ff5dc8c8fe..49ea65348f 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -1,11 +1,12 @@ use super::mmapper::MapState; use super::Mmapper; -use crate::util::Address; - use crate::util::constants::*; use crate::util::conversions::pages_to_bytes; use crate::util::heap::layout::vm_layout::*; +use crate::util::log; use crate::util::memory::MmapStrategy; +use crate::util::Address; + use std::fmt; use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -47,7 +48,7 @@ impl Mmapper for ByteMapMmapper { fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); - trace!( + log::trace!( "Calling ensure_mapped with start={:?} and {} pages, {}-{}", start, pages, @@ -76,7 +77,7 @@ impl Mmapper for ByteMapMmapper { ) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); - trace!( + log::trace!( "Calling quarantine_address_range with start={:?} and {} pages, {}-{}", start, pages, diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index cf209f1186..5862cb7312 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -7,7 +7,9 @@ use crate::util::heap::layout::heap_parameters::*; use crate::util::heap::layout::vm_layout::*; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::int_array_freelist::IntArrayFreeList; +use crate::util::log; use crate::util::Address; + use std::cell::UnsafeCell; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Mutex, MutexGuard}; @@ -77,7 +79,7 @@ impl VMMap for Map32 { self.descriptor_map[index].is_empty(), "Conflicting virtual address request" ); - debug!( + log::debug!( "Set descriptor {:?} for Chunk {}", descriptor, conversions::chunk_index_to_address(index) @@ -168,7 +170,7 @@ impl VMMap for Map32 { } #[allow(clippy::while_immutable_condition)] fn free_all_chunks(&self, any_chunk: Address) { - debug!("free_all_chunks: {}", any_chunk); + log::debug!("free_all_chunks: {}", any_chunk); let (_sync, self_mut) = self.mut_self_with_sync(); debug_assert!(any_chunk == conversions::chunk_align_down(any_chunk)); if !any_chunk.is_zero() { @@ -186,7 +188,7 @@ impl VMMap for Map32 { } unsafe fn free_contiguous_chunks(&self, start: Address) -> usize { - debug!("free_contiguous_chunks: {}", start); + log::debug!("free_contiguous_chunks: {}", start); let (_sync, _) = self.mut_self_with_sync(); debug_assert!(start == conversions::chunk_align_down(start)); let chunk = start.chunk_index(); @@ -297,7 +299,7 @@ impl Map32 { for offset in 0..chunks { let index = (chunk + offset) as usize; let chunk_start = conversions::chunk_index_to_address(index); - debug!("Clear descriptor for Chunk {}", chunk_start); + log::debug!("Clear descriptor for Chunk {}", chunk_start); self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED; SFT_MAP.clear(chunk_start); } diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index 9039bfe261..a6814e39f0 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -1,7 +1,9 @@ use crate::util::heap::layout::vm_layout::*; +use crate::util::log; use crate::util::memory::*; use crate::util::rust_util::rev_group::RevisitableGroupByForIterator; use crate::util::Address; + use atomic::{Atomic, Ordering}; use bytemuck::NoUninit; use std::io::Result; @@ -89,7 +91,7 @@ impl MapState { mmap_start: Address, strategy: MmapStrategy, ) -> Result<()> { - trace!( + log::trace!( "Trying to map {} - {}", mmap_start, mmap_start + MMAP_CHUNK_BYTES @@ -114,7 +116,7 @@ impl MapState { mmap_start: Address, strategy: MmapStrategy, ) -> Result<()> { - trace!( + log::trace!( "Trying to quarantine {} - {}", mmap_start, mmap_start + MMAP_CHUNK_BYTES @@ -158,7 +160,7 @@ impl MapState { mmap_start: Address, strategy: MmapStrategy, ) -> Result<()> { - trace!( + log::trace!( "Trying to bulk-quarantine {} - {}", mmap_start, mmap_start + MMAP_CHUNK_BYTES * state_slices.iter().map(|s| s.len()).sum::(), @@ -178,7 +180,7 @@ impl MapState { match group.key { MapState::Unmapped => { - trace!("Trying to quarantine {} - {}", start_addr, end_addr); + log::trace!("Trying to quarantine {} - {}", start_addr, end_addr); mmap_noreserve(start_addr, end_addr - start_addr, strategy)?; for state in group { @@ -186,10 +188,10 @@ impl MapState { } } MapState::Quarantined => { - trace!("Already quarantine {} - {}", start_addr, end_addr); + log::trace!("Already quarantine {} - {}", start_addr, end_addr); } MapState::Mapped => { - trace!("Already mapped {} - {}", start_addr, end_addr); + log::trace!("Already mapped {} - {}", start_addr, end_addr); } MapState::Protected => { panic!("Cannot quarantine protected memory") diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index 39bb076b24..770f296cc0 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -1,22 +1,22 @@ use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; +use super::layout::VMMap; +use super::pageresource::{PRAllocFail, PRAllocResult}; +use super::PageResource; use crate::policy::space::required_chunks; use crate::util::address::Address; +use crate::util::alloc::embedded_meta_data::*; use crate::util::constants::BYTES_IN_PAGE; use crate::util::conversions::*; -use std::ops::Range; -use std::sync::{Mutex, MutexGuard}; - -use crate::util::alloc::embedded_meta_data::*; use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; use crate::util::heap::pageresource::CommonPageResource; -use crate::util::opaque_pointer::*; - -use super::layout::VMMap; -use super::pageresource::{PRAllocFail, PRAllocResult}; -use super::PageResource; use crate::util::heap::space_descriptor::SpaceDescriptor; +use crate::util::log; +use crate::util::opaque_pointer::*; use crate::vm::VMBinding; + use std::marker::PhantomData; +use std::ops::Range; +use std::sync::{Mutex, MutexGuard}; pub struct MonotonePageResource { common: CommonPageResource, @@ -73,16 +73,19 @@ impl PageResource for MonotonePageResource { required_pages: usize, tls: VMThread, ) -> Result { - debug!( + log::debug!( "In MonotonePageResource, reserved_pages = {}, required_pages = {}", - reserved_pages, required_pages + reserved_pages, + required_pages ); let mut new_chunk = false; let mut sync = self.sync.lock().unwrap(); let mut rtn = sync.cursor; - debug!( + log::debug!( "cursor = {}, sentinel = {}, current_chunk = {}", - sync.cursor, sync.sentinel, sync.current_chunk + sync.cursor, + sync.sentinel, + sync.current_chunk ); if cfg!(debug_assertions) { @@ -106,9 +109,9 @@ impl PageResource for MonotonePageResource { } let bytes = pages_to_bytes(required_pages); - debug!("bytes={}", bytes); + log::debug!("bytes={}", bytes); let mut tmp = sync.cursor + bytes; - debug!("tmp={:?}", tmp); + log::debug!("tmp={:?}", tmp); if !self.common().contiguous && tmp > sync.sentinel { /* we're out of virtual memory within our discontiguous region, so ask for more */ @@ -131,12 +134,12 @@ impl PageResource for MonotonePageResource { debug_assert!(rtn >= sync.cursor && rtn < sync.cursor + bytes); if tmp > sync.sentinel { - //debug!("tmp={:?} > sync.sentinel={:?}", tmp, sync.sentinel); + //log::debug!("tmp={:?} > sync.sentinel={:?}", tmp, sync.sentinel); Result::Err(PRAllocFail) } else { - //debug!("tmp={:?} <= sync.sentinel={:?}", tmp, sync.sentinel); + //log::debug!("tmp={:?} <= sync.sentinel={:?}", tmp, sync.sentinel); sync.cursor = tmp; - debug!("update cursor = {}", tmp); + log::debug!("update cursor = {}", tmp); /* In a contiguous space we can bump along into the next chunk, so preserve the currentChunk invariant */ if self.common().contiguous && chunk_align_down(sync.cursor) != sync.current_chunk { @@ -199,7 +202,7 @@ impl MonotonePageResource { fn log_chunk_fields(&self, space_descriptor: SpaceDescriptor, site: &str) { let sync = self.sync.lock().unwrap(); - debug!( + log::debug!( "[{:?}]{}: cursor={}, current_chunk={}, delta={}", space_descriptor, site, diff --git a/src/util/log.rs b/src/util/log.rs new file mode 100644 index 0000000000..7bb95ac967 --- /dev/null +++ b/src/util/log.rs @@ -0,0 +1,55 @@ +//! This module provides utilities for logging +//! +//! It provide wrappers for the logging macros in the `log` crate. Those macros are used the same +//! way as those in the `log` crate, except that log level `DEBUG` and `TRACE` are disabled by +//! default in release build. They will not be compiled into the resulting binary. But they can be +//! enabled by the "hot_log" Cargo feature so that they will be displayed in release build, too. +//! This module is named `log` so that programmers can comfortably write `log::info!` as if the +//! macro were from the `log` crate. + +// This is just the `log` crate. We renamed it in `Cargo.toml` so that we don't accidentally import +// macros such as `log::info!` from the IDE. +use the_log_crate; + +pub(crate) use the_log_crate::{error, info, warn}; + +/// Whether logs of DEBUG and TRACE levels are enabled. +/// In debug build, they are always enabled. +/// In release build, they are not enabled unless the "hot_log" Cargo feature is enabled. +pub(crate) const HOT_LOG_ENABLED: bool = cfg!(any(not(debug_assertions), feature = "hot_log")); + +/// A wrapper of the `debug!` macro in the `log` crate. +/// Does nothing if [`HOT_LOG_ENABLED`] is false. +macro_rules! debug { + (target: $target:expr, $($arg:tt)+) => { + if $crate::util::log::HOT_LOG_ENABLED { + the_log_crate::debug!(target: $target, $($arg)+) + } + }; + ($($arg:tt)+) => { + if $crate::util::log::HOT_LOG_ENABLED { + the_log_crate::debug!($($arg)+) + } + } +} + +/// A wrapper of the `trace!` macro in the `log` crate. +/// Does nothing if [`HOT_LOG_ENABLED`] is false. +macro_rules! trace { + (target: $target:expr, $($arg:tt)+) => { + if $crate::util::log::HOT_LOG_ENABLED { + the_log_crate::trace!(target: $target, $($arg)+) + } + }; + ($($arg:tt)+) => { + if $crate::util::log::HOT_LOG_ENABLED { + the_log_crate::trace!($($arg)+) + } + } +} + +// By default, a macro has no path-based scope. +// The following allows other modules to access the macros with `crate::util::log::debug` +// and `crate::util::log::trace`. +pub(crate) use debug; +pub(crate) use trace; diff --git a/src/util/logger.rs b/src/util/logger.rs index cc56b93786..cb22db569c 100644 --- a/src/util/logger.rs +++ b/src/util/logger.rs @@ -1,4 +1,6 @@ -use log::{self, SetLoggerError}; +//! This module sets up `env_logger` as the default logger. + +use the_log_crate::{self, SetLoggerError}; /// Attempt to init a env_logger for MMTk. pub fn try_init() -> Result<(), SetLoggerError> { diff --git a/src/util/memory.rs b/src/util/memory.rs index 771068aaff..daa5f39775 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -1,7 +1,9 @@ use crate::util::alloc::AllocationError; +use crate::util::log; use crate::util::opaque_pointer::*; use crate::util::Address; use crate::vm::{Collection, VMBinding}; + use bytemuck::NoUninit; use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; use std::io::{Error, Result}; @@ -202,7 +204,7 @@ pub fn handle_mmap_error( // From Rust nightly 2021-05-12, we started to see Rust added this ErrorKind. ErrorKind::OutOfMemory => { // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. - trace!("Signal MmapOutOfMemory!"); + log::trace!("Signal MmapOutOfMemory!"); VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); unreachable!() } @@ -214,7 +216,7 @@ pub fn handle_mmap_error( // If it is OOM, we invoke out_of_memory() through the VM interface. if os_errno == libc::ENOMEM { // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. - trace!("Signal MmapOutOfMemory!"); + log::trace!("Signal MmapOutOfMemory!"); VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); unreachable!() } diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 2a64b33848..97a1396418 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -2,11 +2,13 @@ use super::*; use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE}; use crate::util::conversions::raw_align_up; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; +use crate::util::log; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; #[cfg(feature = "vo_bit")] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::Address; + use num_traits::FromPrimitive; use ranges::BitByteRange; use std::fmt; @@ -116,7 +118,7 @@ impl SideMetadataSpec { pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) { let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE); - trace!( + log::trace!( "ensure_metadata_is_mapped({}).meta_start({})", data_addr, meta_start @@ -1014,7 +1016,7 @@ impl SideMetadataSpec { // TODO: We should be able to optimize further for this case. However, we need to be careful that the side metadata // is not contiguous, and we need to skip to the next chunk's side metadata when we search to a different chunk. // This won't be used for VO bit, as VO bit is global and is always contiguous. So for now, I am not bothered to do it. - warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case."); + log::warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case."); self.find_prev_non_zero_value_simple::(data_addr, search_limit_bytes) } } @@ -1156,7 +1158,7 @@ impl SideMetadataSpec { // side mark bits, we need to refactor `bulk_update_metadata` to support `FnMut`, too, // and use it to apply `scan_non_zero_values_fast` on each contiguous side metadata // range. - warn!( + log::warn!( "We are trying to search for non zero bits in a discontiguous side metadata \ or the metadata has more than one bit per region. \ The performance is slow, as MMTk does not optimize for this case." @@ -1372,7 +1374,7 @@ impl SideMetadataContext { /// Tries to map the required metadata space and returns `true` is successful. /// This can be called at page granularity. pub fn try_map_metadata_space(&self, start: Address, size: usize) -> Result<()> { - debug!( + log::debug!( "try_map_metadata_space({}, 0x{:x}, {}, {})", start, size, @@ -1390,7 +1392,7 @@ impl SideMetadataContext { /// /// NOTE: Accessing addresses in this range will produce a segmentation fault if swap-space is not mapped using the `try_map_metadata_space` function. pub fn try_map_metadata_address_range(&self, start: Address, size: usize) -> Result<()> { - debug!( + log::debug!( "try_map_metadata_address_range({}, 0x{:x}, {}, {})", start, size, @@ -1472,7 +1474,7 @@ impl SideMetadataContext { /// the actual unmapped space will be bigger than what you specify. #[cfg(test)] pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) { - trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size); + log::trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size); debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); debug_assert!(size % BYTES_IN_PAGE == 0); diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index 0f0a442d9a..5eea0075bd 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -3,11 +3,13 @@ use super::SideMetadataSpec; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout::VMLayout; +use crate::util::log; use crate::util::memory::MmapStrategy; #[cfg(target_pointer_width = "32")] use crate::util::metadata::side_metadata::address_to_chunked_meta_address; use crate::util::Address; use crate::MMAPPER; + use std::io::Result; /// Performs address translation in contiguous metadata spaces (e.g. global and policy-specific in 64-bits, and global in 32-bits) @@ -93,7 +95,7 @@ pub(super) fn align_metadata_address( #[cfg(test)] pub(crate) fn ensure_munmap_metadata(start: Address, size: usize) { use crate::util::memory; - trace!("ensure_munmap_metadata({}, 0x{:x})", start, size); + log::trace!("ensure_munmap_metadata({}, 0x{:x})", start, size); assert!(memory::munmap(start, size).is_ok()) } @@ -172,7 +174,7 @@ pub(crate) fn address_to_meta_address( #[cfg(target_pointer_width = "64")] let res = { address_to_contiguous_meta_address(metadata_spec, data_addr) }; - trace!( + log::trace!( "address_to_meta_address({:?}, addr: {}) -> 0x{:x}", metadata_spec, data_addr, diff --git a/src/util/metadata/side_metadata/helpers_32.rs b/src/util/metadata/side_metadata/helpers_32.rs index ef413c0cc6..a7a6f2f256 100644 --- a/src/util/metadata/side_metadata/helpers_32.rs +++ b/src/util/metadata/side_metadata/helpers_32.rs @@ -143,7 +143,7 @@ pub(super) fn try_map_per_chunk_metadata_space( panic!("We have failed mmap"); } } - trace!( + log::trace!( "try_map_per_chunk_metadata_space({}, 0x{:x}, 0x{:x}) -> {:#?}", start, size, @@ -160,7 +160,7 @@ pub(super) fn try_map_per_chunk_metadata_space( total_mapped += local_per_chunk; } - trace!( + log::trace!( "try_map_per_chunk_metadata_space({}, 0x{:x}, 0x{:x}) -> OK(())", start, size, diff --git a/src/util/metadata/side_metadata/sanity.rs b/src/util/metadata/side_metadata/sanity.rs index 48d3c55de9..e5aea0b318 100644 --- a/src/util/metadata/side_metadata/sanity.rs +++ b/src/util/metadata/side_metadata/sanity.rs @@ -1,4 +1,6 @@ +use crate::util::log; use crate::util::Address; + use std::collections::HashMap; use std::io::{Error, ErrorKind, Result}; use std::sync::{Mutex, RwLock}; @@ -380,9 +382,10 @@ fn verify_metadata_address_bound(spec: &SideMetadataSpec, data_addr: Address) { data_addr <= unsafe { Address::from_usize(1usize << vm_layout().log_address_space) }; if !data_addr_in_address_space { - warn!( + log::warn!( "We try get metadata {} for {}, which is not within the address space we should use", - data_addr, spec.name + data_addr, + spec.name ); } diff --git a/src/util/mod.rs b/src/util/mod.rs index d22c29a2e3..ca26ad1c5b 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -51,6 +51,8 @@ pub(crate) mod epilogue; pub(crate) mod erase_vm; /// Finalization implementation. pub(crate) mod finalizable_processor; +/// Logging wrappers +pub(crate) mod log; /// Logger initialization pub(crate) mod logger; pub(crate) mod object_enum; diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 4b3f2411e9..24c8b1d1c6 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -1,8 +1,10 @@ use crate::util::copy::*; +use crate::util::log; use crate::util::metadata::MetadataSpec; use crate::util::{constants, ObjectReference}; use crate::vm::ObjectModel; use crate::vm::VMBinding; + use std::sync::atomic::Ordering; const FORWARDING_NOT_TRIGGERED_YET: u8 = 0b00; @@ -193,7 +195,7 @@ pub fn write_forwarding_pointer( get_forwarding_status::(object), ); - trace!("write_forwarding_pointer({}, {})", object, new_object); + log::trace!("write_forwarding_pointer({}, {})", object, new_object); VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, new_object.to_raw_address().as_usize(), diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index f997c16742..619270bfbf 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -7,6 +7,7 @@ use std::vec::Vec; use crate::plan::is_nursery_gc; use crate::scheduler::ProcessEdgesWork; use crate::scheduler::WorkBucketStage; +use crate::util::log; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::ReferenceGlue; @@ -39,17 +40,17 @@ impl ReferenceProcessors { } pub fn add_soft_candidate(&self, reff: ObjectReference) { - trace!("Add soft candidate: {}", reff); + log::trace!("Add soft candidate: {}", reff); self.soft.add_candidate(reff); } pub fn add_weak_candidate(&self, reff: ObjectReference) { - trace!("Add weak candidate: {}", reff); + log::trace!("Add weak candidate: {}", reff); self.weak.add_candidate(reff); } pub fn add_phantom_candidate(&self, reff: ObjectReference) { - trace!("Add phantom candidate: {}", reff); + log::trace!("Add phantom candidate: {}", reff); self.phantom.add_candidate(reff); } @@ -278,7 +279,7 @@ impl ReferenceProcessor { } if !sync.enqueued_references.is_empty() { - trace!("enqueue: {:?}", sync.enqueued_references); + log::trace!("enqueue: {:?}", sync.enqueued_references); VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls); sync.enqueued_references.clear(); } @@ -291,7 +292,7 @@ impl ReferenceProcessor { /// nursery is not used for this. pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); - debug!("Starting ReferenceProcessor.forward({:?})", self.semantics); + log::debug!("Starting ReferenceProcessor.forward({:?})", self.semantics); // Forward a single reference fn forward_reference( @@ -300,7 +301,7 @@ impl ReferenceProcessor { ) -> ObjectReference { { use crate::vm::ObjectModel; - trace!( + log::trace!( "Forwarding reference: {} (size: {})", reference, ::VMObjectModel::get_current_size(reference) @@ -313,7 +314,7 @@ impl ReferenceProcessor { let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent); ::VMReferenceGlue::set_referent(reference, new_referent); - trace!( + log::trace!( " referent: {} (forwarded to {})", old_referent, new_referent @@ -321,7 +322,7 @@ impl ReferenceProcessor { } let new_reference = ReferenceProcessor::trace_forward_object(trace, reference); - trace!(" reference: forwarded to {}", new_reference); + log::trace!(" reference: forwarded to {}", new_reference); new_reference } @@ -338,7 +339,7 @@ impl ReferenceProcessor { .map(|reff| forward_reference::(trace, *reff)) .collect(); - debug!("Ending ReferenceProcessor.forward({:?})", self.semantics); + log::debug!("Ending ReferenceProcessor.forward({:?})", self.semantics); // We finish forwarding. No longer accept new candidates. self.disallow_new_candidate(); @@ -352,9 +353,9 @@ impl ReferenceProcessor { fn scan(&self, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); - debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); + log::debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); - trace!( + log::trace!( "{:?} Reference table is {:?}", self.semantics, sync.references @@ -371,7 +372,7 @@ impl ReferenceProcessor { .filter_map(|reff| self.process_reference::(*reff, &mut enqueued_references)) .collect(); - debug!( + log::debug!( "{:?} reference table from {} to {} ({} enqueued)", self.semantics, sync.references.len(), @@ -381,7 +382,7 @@ impl ReferenceProcessor { sync.references = new_set; sync.enqueued_references.extend(enqueued_references); - debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); + log::debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); } /// Retain referent in the reference table. This method deals only with soft references. @@ -393,15 +394,15 @@ impl ReferenceProcessor { let sync = self.sync.lock().unwrap(); - debug!("Starting ReferenceProcessor.retain({:?})", self.semantics); - trace!( + log::debug!("Starting ReferenceProcessor.retain({:?})", self.semantics); + log::trace!( "{:?} Reference table is {:?}", self.semantics, sync.references ); for reference in sync.references.iter() { - trace!("Processing reference: {:?}", reference); + log::trace!("Processing reference: {:?}", reference); if !reference.is_live() { // Reference is currently unreachable but may get reachable by the @@ -412,11 +413,11 @@ impl ReferenceProcessor { if let Some(referent) = ::VMReferenceGlue::get_referent(*reference) { Self::keep_referent_alive(trace, referent); - trace!(" ~> {:?} (retained)", referent); + log::trace!(" ~> {:?} (retained)", referent); } } - debug!("Ending ReferenceProcessor.retain({:?})", self.semantics); + log::debug!("Ending ReferenceProcessor.retain({:?})", self.semantics); } /// Process a reference. @@ -431,30 +432,30 @@ impl ReferenceProcessor { reference: ObjectReference, enqueued_references: &mut Vec, ) -> Option { - trace!("Process reference: {}", reference); + log::trace!("Process reference: {}", reference); // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. if !reference.is_live() { VM::VMReferenceGlue::clear_referent(reference); - trace!(" UNREACHABLE reference: {}", reference); + log::trace!(" UNREACHABLE reference: {}", reference); return None; } // The reference object is live. let new_reference = Self::get_forwarded_reference(reference); - trace!(" forwarded to: {}", new_reference); + log::trace!(" forwarded to: {}", new_reference); // Get the old referent. let maybe_old_referent = VM::VMReferenceGlue::get_referent(reference); - trace!(" referent: {:?}", maybe_old_referent); + log::trace!(" referent: {:?}", maybe_old_referent); // If the application has cleared the referent the Java spec says // this does not cause the Reference object to be enqueued. We // simply allow the Reference object to fall out of our // waiting list. let Some(old_referent) = maybe_old_referent else { - trace!(" (cleared referent) "); + log::trace!(" (cleared referent) "); return None; }; @@ -463,7 +464,7 @@ impl ReferenceProcessor { // or stronger than the current reference level. let new_referent = Self::get_forwarded_referent(old_referent); debug_assert!(new_referent.is_live()); - trace!(" forwarded referent to: {}", new_referent); + log::trace!(" forwarded referent to: {}", new_referent); // The reference object stays on the waiting list, and the // referent is untouched. The only thing we must do is @@ -476,7 +477,7 @@ impl ReferenceProcessor { Some(new_reference) } else { // Referent is unreachable. Clear the referent and enqueue the reference object. - trace!(" UNREACHABLE referent: {}", old_referent); + log::trace!(" UNREACHABLE referent: {}", old_referent); VM::VMReferenceGlue::clear_referent(new_reference); enqueued_references.push(new_reference); diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index d3d3cdb007..91ce7bb62c 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -1,5 +1,6 @@ use crate::plan::Plan; use crate::scheduler::gc_work::*; +use crate::util::log; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::vm::*; @@ -127,7 +128,7 @@ impl SanityPrepare

{ impl GCWork for SanityPrepare

{ fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - info!("Sanity GC prepare"); + log::info!("Sanity GC prepare"); { let mut sanity_checker = mmtk.sanity_checker.lock().unwrap(); sanity_checker.refs.clear(); @@ -147,7 +148,7 @@ impl SanityRelease

{ impl GCWork for SanityRelease

{ fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - info!("Sanity GC release"); + log::info!("Sanity GC release"); mmtk.sanity_checker.lock().unwrap().clear_roots_cache(); mmtk.sanity_end(); } @@ -210,7 +211,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { // Object is not "marked" sanity_checker.refs.insert(object); // "Mark" it - trace!("Sanity mark object {}", object); + log::trace!("Sanity mark object {}", object); self.nodes.enqueue(object); } diff --git a/src/util/slot_logger.rs b/src/util/slot_logger.rs index 7cb231d8dc..d97b27486a 100644 --- a/src/util/slot_logger.rs +++ b/src/util/slot_logger.rs @@ -5,6 +5,7 @@ //! use crate::plan::Plan; +use crate::util::log; use crate::vm::slot::Slot; use crate::vm::VMBinding; use std::collections::HashSet; @@ -32,7 +33,7 @@ impl SlotLogger { /// * `slot` - The slot to log. /// pub fn log_slot(&self, slot: SL) { - trace!("log_slot({:?})", slot); + log::trace!("log_slot({:?})", slot); let mut slot_log = self.slot_log.write().unwrap(); assert!( slot_log.insert(slot), diff --git a/src/util/treadmill.rs b/src/util/treadmill.rs index 96b5eb9e7d..f505aafc8a 100644 --- a/src/util/treadmill.rs +++ b/src/util/treadmill.rs @@ -2,6 +2,7 @@ use std::collections::HashSet; use std::mem::swap; use std::sync::Mutex; +use crate::util::log; use crate::util::ObjectReference; use super::object_enum::ObjectEnumerator; @@ -36,10 +37,10 @@ impl TreadMill { pub fn add_to_treadmill(&self, object: ObjectReference, nursery: bool) { if nursery { - trace!("Adding {} to nursery", object); + log::trace!("Adding {} to nursery", object); self.alloc_nursery.lock().unwrap().insert(object); } else { - trace!("Adding {} to to_space", object); + log::trace!("Adding {} to to_space", object); self.to_space.lock().unwrap().insert(object); } } @@ -95,10 +96,10 @@ impl TreadMill { pub fn flip(&mut self, full_heap: bool) { swap(&mut self.alloc_nursery, &mut self.collect_nursery); - trace!("Flipped alloc_nursery and collect_nursery"); + log::trace!("Flipped alloc_nursery and collect_nursery"); if full_heap { swap(&mut self.from_space, &mut self.to_space); - trace!("Flipped from_space and to_space"); + log::trace!("Flipped from_space and to_space"); } } diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 05b6260f32..e98b7eeeb6 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -1,5 +1,6 @@ use crate::plan::Mutator; use crate::scheduler::GCWorker; +use crate::util::log; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::slot::Slot; @@ -14,8 +15,7 @@ pub trait SlotVisitor { /// This lets us use closures as SlotVisitor. impl SlotVisitor for F { fn visit_slot(&mut self, slot: SL) { - #[cfg(debug_assertions)] - trace!( + log::trace!( "(FunctionClosure) Visit slot {:?} (pointing to {:?})", slot, slot.load() diff --git a/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs index bef39dcab0..7b25b05540 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs @@ -17,10 +17,10 @@ pub fn allocate_alignment() { MUTATOR.with_fixture_mut(|fixture| { let min = MockVM::MIN_ALIGNMENT; let max = MockVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); + log::info!("Allowed alignment between {} and {}", min, max); let mut align = min; while align <= max { - info!("Test allocation with alignment {}", align); + log::info!("Test allocation with alignment {}", align); let addr = memory_manager::alloc( &mut fixture.mutator, 8, @@ -51,12 +51,13 @@ pub fn allocate_offset() { const OFFSET: usize = 4; let min = MockVM::MIN_ALIGNMENT; let max = MockVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); + log::info!("Allowed alignment between {} and {}", min, max); let mut align = min; while align <= max { - info!( + log::info!( "Test allocation with alignment {} and offset {}", - align, OFFSET + align, + OFFSET ); let addr = memory_manager::alloc( &mut fixture.mutator, diff --git a/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs b/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs index e78d5e2169..02a511586b 100644 --- a/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs +++ b/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs @@ -14,10 +14,10 @@ pub fn nogc_lock_free_allocate() { let mut fixture = MutatorFixture::create(); let min = MockVM::MIN_ALIGNMENT; let max = MockVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); + log::info!("Allowed alignment between {} and {}", min, max); let mut align = min; while align <= max { - info!("Test allocation with alignment {}", align); + log::info!("Test allocation with alignment {}", align); let addr = memory_manager::alloc( &mut fixture.mutator, 8, @@ -25,7 +25,7 @@ pub fn nogc_lock_free_allocate() { 0, AllocationSemantics::Default, ); - info!("addr = {}", addr); + log::info!("addr = {}", addr); assert!( addr.is_aligned_to(align), "Expected allocation alignment {}, returned address is {:?}",