diff --git a/src/domain.rs b/src/domain.rs index c83c168..222935b 100644 --- a/src/domain.rs +++ b/src/domain.rs @@ -1041,8 +1041,8 @@ mod tests { #[test] fn create_multiple_unique_domains() { use crate::Singleton; - let domain_1 = unique_domain!(); - let domain_2 = unique_domain!(); + let _domain_1 = unique_domain!(); + let _domain_2 = unique_domain!(); } #[test] diff --git a/src/hazard.rs b/src/hazard.rs index 098b447..63bb06e 100644 --- a/src/hazard.rs +++ b/src/hazard.rs @@ -239,6 +239,94 @@ impl<'domain, F> HazardPointer<'domain, F> { /// /// Note that protecting a given pointer only has an effect if any thread that may drop the /// pointer does so through the same [`Domain`] as this hazard pointer is associated with. + /// + /// It's important to note that this function solely writes the pointer value to the hazard + /// pointer slot. However, this protection alone does not guarantee safety during dereferencing + /// due to two key reasons: + /// + /// 1. The announcement made by the hazard pointer might not be immediately visible to + /// reclaiming threads, especially in a weak memory model. + /// 2. Concurrent threads could already have retired the pointer before the protection. + /// + /// To ensure safety, users need to appropriately synchronize the write operation on a hazard + /// slot and validate that the pointer hasn't already been retired. For synchronization, the + /// library offers an [`asymmetric_light_barrier`] function. It enables reclaiming threads + /// to acknowledge the preceding protection. + /// + /// Manual pointer protection and validation involve the following steps: + /// + /// 1. Acquire a pointer `p`, and manually protect it with a [`HazardPointer`] by calling + /// [`HazardPointer::protect_raw`]. + /// 2. Issue a memory barrier with [`asymmetric_light_barrier`] to enable reclaiming threads + /// to recognize the preceding protection. + /// 3. Validate whether `p` is retired. If `p` remains guaranteed as not retired, it is safe + /// for dereferencing. Otherwise, revisit step 1 and retry. + /// + /// The strategy to validate whether `p` is retired would depend on the semantics of + /// the data structures or algorithms. For example, in Harris-Michael linked lists, validation + /// can be done by reloading the pointer from the [`AtomicPtr`] and ensuring that its + /// value has not changed. This strategy works because unlinking the node from its predecessor + /// strictly *happens before* the retirement of that node under the data structure's semantics. + /// + /// # Example + /// + /// ``` + /// use haphazard::{AtomicPtr, HazardPointer, asymmetric_light_barrier}; + /// + /// struct Node { + /// value: usize, + /// next: AtomicPtr, + /// } + /// + /// // Let's imagine a data structure that has the following properties. + /// // + /// // 1. It always has exactly two nodes. + /// // 2. A thread may change its contents by exchanging the `head` pointer with another chain + /// // consisting of two nodes. + /// // 3. After a successful `compare_exchange`, the thread retires popped nodes without + /// // unlinking the first and the second node. + /// // + /// // Note that the link between the first and the second node won't be changed + /// // before the retirement! For this reason, to validate the protection of the second node, + /// // one must reload the head pointer and confirm that it has not changed. + /// let head = + /// AtomicPtr::from(Box::new(Node { + /// value: 0, + /// next: AtomicPtr::from(Box::new(Node { + /// value: 1, + /// next: unsafe { AtomicPtr::new(std::ptr::null_mut()) }, + /// })), + /// })); + /// + /// let mut hp1 = HazardPointer::default(); + /// let mut hp2 = HazardPointer::default(); + /// + /// let (n1, n2) = loop { + /// // The first node can be loaded in a conventional way. + /// let n1 = head.safe_load(&mut hp1).expect("The first node must exist"); + /// + /// // However, the second one cannot, because of the aforementioned reasons. + /// let ptr = n1.next.load_ptr(); + /// // 1. Announce a hazard pointer manually. + /// hp2.protect_raw(ptr); + /// + /// // 2. Synchronize with reclaimers. + /// asymmetric_light_barrier(); + /// + /// // 3. Validate the second protection by reloading the head pointer. + /// if n1 as *const _ == head.load_ptr().cast_const() { + /// // If the link to the head node has not changed, + /// // it is guaranteed that the second node is not retired yet. + /// + /// // Safety: `ptr` is properly protected by `hp2`. + /// let n2 = unsafe { &*ptr }; + /// break (n1, n2); + /// } + /// + /// }; + /// + /// // Here, `n1` and `n2` is safe for dereferencing. + /// ``` pub fn protect_raw(&mut self, ptr: *mut T) where F: 'static, diff --git a/src/lib.rs b/src/lib.rs index b623d86..5160bc3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -143,7 +143,18 @@ mod pointer; mod record; mod sync; -fn asymmetric_light_barrier() { +/// Issue a memory barrier to announce a protection to reclaiming threads. +/// +/// In most cases, you do not need to use this function, because [`AtomicPtr::safe_load`] and all +/// protection methods of [`HazardPointer`] except [`HazardPointer::protect_raw`] properly protect +/// a pointer and internally call this function to validate protection. +/// +/// However, in specific data structures or algorithms requiring manual pointer protection using +/// [`HazardPointer::protect_raw`], this function can be used to manually synchronize the memory +/// writes with reclaiming threads. +/// +/// See also [`HazardPointer::protect_raw`]. +pub fn asymmetric_light_barrier() { // TODO: if cfg!(linux) { // https://github.com/facebook/folly/blob/bd600cd4e88f664f285489c76b6ad835d8367cd2/folly/portability/Asm.h#L28 crate::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst); diff --git a/tests/lib.rs b/tests/lib.rs index c3a2ca1..b229518 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -17,14 +17,8 @@ fn acquires_multiple() { let domain = Domain::new(&()); - let x = AtomicPtr::new(Box::into_raw(Box::new(( - 42, - CountDrops(Arc::clone(&drops_42)), - )))); - let y = AtomicPtr::new(Box::into_raw(Box::new(( - 42, - CountDrops(Arc::clone(&drops_42)), - )))); + let x = AtomicPtr::new(Box::into_raw(Box::new((42, CountDrops(Arc::clone(&drops_42)))))); + let y = AtomicPtr::new(Box::into_raw(Box::new((42, CountDrops(Arc::clone(&drops_42)))))); // As a reader: let mut hazptr_array = HazardPointer::many_in_domain(&domain); @@ -77,10 +71,7 @@ fn acquires_multiple() { fn feels_good() { let drops_42 = Arc::new(AtomicUsize::new(0)); - let x = AtomicPtr::new(Box::into_raw(Box::new(( - 42, - CountDrops(Arc::clone(&drops_42)), - )))); + let x = AtomicPtr::new(Box::into_raw(Box::new((42, CountDrops(Arc::clone(&drops_42)))))); // As a reader: let mut h = HazardPointer::new(); @@ -166,10 +157,7 @@ fn drop_domain() { let drops_42 = Arc::new(AtomicUsize::new(0)); - let x = AtomicPtr::new(Box::into_raw(Box::new(( - 42, - CountDrops(Arc::clone(&drops_42)), - )))); + let x = AtomicPtr::new(Box::into_raw(Box::new((42, CountDrops(Arc::clone(&drops_42)))))); // As a reader: let mut h = HazardPointer::new_in_domain(&domain); @@ -233,3 +221,93 @@ fn hazardptr_compare_exchange_fail() { let _ = unsafe { Box::from_raw(not_current) }; } + +#[test] +fn manual_validation() { + struct Node { + value: usize, + next: haphazard::AtomicPtr, + } + + // Let's imagine a data structure which has the following properties. + // + // 1. It always has exactly two nodes. + // 2. A thread may change its contents by exchanging the `head` pointer with an another + // chain consisted of two nodes. + // 3. After a successful `compare_exchange`, the thread retires popped nodes without unlinking + // the first and the second node. + // + // Note that the link between the first and the second node won't be changed + // before the retirement! For this reason, to validate the protection for the second node, + // one must reload the head pointer and confirm that it has not changed. + let head = + haphazard::AtomicPtr::from(Box::new(Node { + value: 0, + next: haphazard::AtomicPtr::from(Box::new(Node { + value: 1, + next: unsafe { haphazard::AtomicPtr::new(std::ptr::null_mut()) }, + })), + })); + + const THREADS: usize = 8; + const ITERS: usize = 512; + + std::thread::scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + let mut hp1 = HazardPointer::default(); + let mut hp2 = HazardPointer::default(); + for _ in 0..ITERS { + loop { + let (n1, n2) = loop { + // The first node can be loaded in a conventional way. + let n1 = head.safe_load(&mut hp1).expect("The first node must exist"); + + let ptr = n1.next.load_ptr(); + hp2.protect_raw(ptr); + + // Synchronize with reclaimers. + asymmetric_light_barrier(); + + // Validate the second protection by reloading the head pointer. + if n1 as *const _ == head.load_ptr().cast_const() { + // Safety: Because the head pointer did not change, + // the two nodes are not retired, and the previous + // protection is valid! + let n2 = unsafe { &*ptr }; + break (n1, n2); + } + }; + + let next = Box::new(Node { + value: n1.value + 1, + next: haphazard::AtomicPtr::from(Box::new(Node { + value: n2.value + 1, + next: unsafe { haphazard::AtomicPtr::new(std::ptr::null_mut()) }, + })), + }); + if head + .compare_exchange(n1 as *const _ as *mut Node, next) + .is_ok() + { + // Safety: As we won the race of exchanging the head node, + // they have not already been retired. + unsafe { + Domain::global() + .retire_ptr::<_, Box<_>>(n1 as *const _ as *mut Node); + Domain::global() + .retire_ptr::<_, Box<_>>(n2 as *const _ as *mut Node); + } + break; + } + } + } + }); + } + }); + + let n1 = unsafe { Box::from_raw(head.into_inner()) }; + let n2 = unsafe { Box::from_raw(n1.next.into_inner()) }; + assert_eq!(n1.value, THREADS * ITERS); + assert_eq!(n2.value, THREADS * ITERS + 1); +}