From 119a2f2a71e57e937c3ecede8735c64f7fe2a08b Mon Sep 17 00:00:00 2001 From: Cassy343 Date: Mon, 25 Jul 2022 00:57:58 -0400 Subject: [PATCH] Rename + Send/Sync tests --- src/algorithm.rs | 10 +++---- src/core/mod.rs | 15 +++++++--- src/core/store.rs | 2 ++ src/lib.rs | 73 +++++++++++++++++++++++++++++++++++++++++++++++ src/write.rs | 5 +++- 5 files changed, 95 insertions(+), 10 deletions(-) diff --git a/src/algorithm.rs b/src/algorithm.rs index ad8f49d..8c715c9 100644 --- a/src/algorithm.rs +++ b/src/algorithm.rs @@ -145,9 +145,9 @@ count to 0 and then `unpark` the writing thread. ## The Write Algorithm -The write algorithm is split into two parts: `synchronize` + start write, and `finish_write`. When +The write algorithm is split into two parts: `synchronize` + start write, and `publish`. When a new write guard is created, `start_write` is called, and when that guard is dropped -`finish_write` is called. +`publish` is called. `synchronize` + start write: 1. If there are no residual readers, we are done. @@ -155,10 +155,10 @@ a new write guard is created, `start_write` is called, and when that guard is dr 3. Once unblocked, obtain a reference to the writable map. 4. Apply changes from previous write. -After these steps but before `finish_write`, the changes to the writable map are made and stored in +After these steps but before `publish`, the changes to the writable map are made and stored in the operation log. -`finish_write`: +`publish`: 1. Acquire the lock on the refcount array. 2. Swap out the value of the field storing the writable map with the old map. 3. Call `swap_maps` on each refcount in the array, and (non-atomically) accumulate a sum of the @@ -171,7 +171,7 @@ the operation log. Note that read handles are not swapped to the new map at the same time, this is done one-by-one. -An important invariant of this algorithm is that `residual == 0` whenever `finish_write` is called. +An important invariant of this algorithm is that `residual == 0` whenever `publish` is called. That way, either the writing thread will see `residual == 0` after swapping all the maps, or one of the residual readers will see `residual & isize::MAX == 1` as the old value when it performs its atomic decrement. In either case, this provides a definite signal that there are no more readers diff --git a/src/core/mod.rs b/src/core/mod.rs index 3fb0229..0f65078 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -32,8 +32,15 @@ pub struct Core { writer_thread: UnsafeCell>, writer_map: Cell, maps: OwnedMapAccess, - // TODO: figure out if core can implement send or sync - _not_send_sync: PhantomData<*const u8>, + _not_sync: PhantomData<*const u8>, +} + +unsafe impl Send for Core +where + Alias: Send, + Alias: Send, + S: Send, +{ } impl Core @@ -59,7 +66,7 @@ where writer_thread: UnsafeCell::new(None), writer_map: Cell::new(MapIndex::Second), maps: OwnedMapAccess::new(maps), - _not_send_sync: PhantomData, + _not_sync: PhantomData, }); let write_handle = unsafe { WriteHandle::new(Arc::clone(&me)) }; @@ -154,7 +161,7 @@ impl Core { } #[inline] - pub unsafe fn finish_write(&self) { + pub unsafe fn publish(&self) { debug_assert_eq!(self.residual.load(Ordering::Relaxed), 0); fence(Ordering::Release); diff --git a/src/core/store.rs b/src/core/store.rs index 1fe6b90..d22dd22 100644 --- a/src/core/store.rs +++ b/src/core/store.rs @@ -71,7 +71,9 @@ impl SharedMapAccess { let maps = unsafe { self.maps.as_ref() }; &maps[map_index as usize] } +} +impl Clone for SharedMapAccess { fn clone(&self) -> Self { Self { maps: self.maps } } diff --git a/src/lib.rs b/src/lib.rs index 4757f20..1b810ec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -213,3 +213,76 @@ pub(crate) struct BuilderArgs { pub h1: S, pub h2: S, } + +/// ```compile_fail +/// fn assert_send() {} +/// use flashmap::*; +/// assert_send::>(); +/// ``` +/// +/// ```compile_fail +/// fn assert_send() {} +/// use flashmap::*; +/// assert_send::>>(); +/// ``` +#[allow(dead_code)] +struct NotSendTypes; + +/// ```compile_fail +/// fn assert_sync() {} +/// use flashmap::*; +/// assert_sync::>(); +/// ``` +/// +/// ```compile_fail +/// fn assert_sync() {} +/// use flashmap::*; +/// assert_sync::>>(); +/// ``` +#[allow(dead_code)] +struct NotSyncTypes; + +#[cfg(test)] +mod tests { + use super::*; + use std::{collections::hash_map::DefaultHasher, marker::PhantomData}; + + fn assert_send() {} + fn assert_sync() {} + + #[derive(PartialEq, Eq, Hash)] + struct SendOnly(PhantomData<*const u8>); + + unsafe impl Send for SendOnly {} + + #[derive(PartialEq, Eq, Hash)] + struct SyncOnly(PhantomData<*const u8>); + + unsafe impl Sync for SyncOnly {} + + #[derive(PartialEq, Eq, Hash)] + struct SendSync; + + impl BuildHasher for SendSync { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> Self::Hasher { + unimplemented!() + } + } + + #[test] + fn send_types() { + assert_send::>(); + assert_send::>(); + assert_send::>>(); + assert_send::>(); + } + + #[test] + fn sync_types() { + assert_sync::>(); + assert_sync::>>(); + assert_sync::>(); + } +} diff --git a/src/write.rs b/src/write.rs index 04f6911..fcd50a2 100644 --- a/src/write.rs +++ b/src/write.rs @@ -409,7 +409,7 @@ where S: BuildHasher, { fn drop(&mut self) { - unsafe { self.handle.core.finish_write() }; + unsafe { self.handle.core.publish() }; } } @@ -567,6 +567,9 @@ pub struct Leaked { handle_uid: WriterUid, } +unsafe impl Send for Leaked where V: Send {} +unsafe impl Sync for Leaked where V: Sync {} + impl Leaked { /// Consumes this leaked value, providing the inner aliased value. Note that the aliased value /// must be manually dropped via `Alias::`[`drop`](crate::Alias::drop), or converted into an