Skip to content

Commit

Permalink
Internal code improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
Cassy343 committed Jul 20, 2022
1 parent 25fca2a commit a68a285
Show file tree
Hide file tree
Showing 5 changed files with 80 additions and 32 deletions.
64 changes: 45 additions & 19 deletions src/core/handle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,18 @@ use crate::{
loom::{
cell::{Cell, UnsafeCell},
sync::{
atomic::{fence, AtomicU8, AtomicUsize, Ordering},
atomic::{fence, AtomicIsize, AtomicU8, Ordering},
Arc, Mutex,
},
thread::{self, Thread},
},
util::Alias,
util::{cold, lock, Alias},
};
use crate::{util::CachePadded, Map, ReadHandle, WriteHandle};
use crate::{Builder, BuilderArgs};
use std::hash::{BuildHasher, Hash};
use std::marker::PhantomData;
use std::process::abort;
use std::ptr::{self, NonNull};

use super::{OwnedMapAccess, RefCount};
Expand All @@ -26,7 +27,7 @@ const NOT_WRITABLE: u8 = 1;
const WAITING_ON_READERS: u8 = 2;

pub struct Handle<K, V, S = DefaultHashBuilder> {
residual: AtomicUsize,
residual: AtomicIsize,
// All readers need to be dropped before we're dropped, so we don't need to worry about
// freeing any refcounts.
refcounts: Mutex<Slab<NonNull<RefCount>>>,
Expand Down Expand Up @@ -59,7 +60,7 @@ where
let init_refcount_capacity = 1;

let me = Arc::new(Self {
residual: AtomicUsize::new(0),
residual: AtomicIsize::new(0),
refcounts: Mutex::new(Slab::with_capacity(init_refcount_capacity)),
writer_thread: UnsafeCell::new(None),
writer_state: AtomicU8::new(WRITABLE),
Expand All @@ -78,7 +79,7 @@ where
impl<K, V, S> Handle<K, V, S> {
#[inline]
pub fn new_reader(me: Arc<Self>) -> ReadHandle<K, V, S> {
let mut guard = me.refcounts.lock().unwrap();
let mut guard = lock(&me.refcounts);
let refcount = RefCount::new(me.writer_map.get().other());
let refcount = NonNull::new(Box::into_raw(Box::new(refcount))).unwrap();
let key = guard.insert(refcount);
Expand All @@ -90,7 +91,7 @@ impl<K, V, S> Handle<K, V, S> {

#[inline]
pub unsafe fn release_refcount(&self, key: usize) {
let refcount = self.refcounts.lock().unwrap().remove(key);
let refcount = lock(&self.refcounts).remove(key);

drop(unsafe { Box::from_raw(refcount.as_ptr()) });
}
Expand All @@ -102,11 +103,22 @@ impl<K, V, S> Handle<K, V, S> {

if self.residual.fetch_sub(1, Ordering::AcqRel) == 1 {
if self.writer_state.swap(WRITABLE, Ordering::AcqRel) == WAITING_ON_READERS {
self.writer_thread.with(|ptr| unsafe {
let parker = (*ptr).as_ref();
debug_assert!(parker.is_some());
parker.unwrap_unchecked().unpark();
});
let thread = self
.writer_thread
.with_mut(|ptr| unsafe { &mut *ptr }.take());

match thread {
Some(thread) => thread.unpark(),
None => {
if cfg!(debug_assertions) {
unreachable!(
"WAITING_ON_READERS state observed when writer_thread is None"
);
} else {
cold();
}
}
}
}
}
}
Expand All @@ -116,8 +128,11 @@ impl<K, V, S> Handle<K, V, S> {
let writer_state = self.writer_state.load(Ordering::Acquire);

if writer_state == NOT_WRITABLE {
self.writer_thread
.with_mut(|ptr| drop(unsafe { ptr::replace(ptr, Some(thread::current())) }));
let current = Some(thread::current());
let old = self
.writer_thread
.with_mut(|ptr| unsafe { ptr::replace(ptr, current) });
drop(old);

let exchange_result = self.writer_state.compare_exchange(
NOT_WRITABLE,
Expand Down Expand Up @@ -158,17 +173,28 @@ impl<K, V, S> Handle<K, V, S> {

self.writer_state.store(NOT_WRITABLE, Ordering::Relaxed);

let guard = self.refcounts.lock().unwrap();
let guard = lock(&self.refcounts);

// This needs to be within the mutex
self.writer_map.set(self.writer_map.get().other());

fence(Ordering::Release);

let initial_residual = guard
.iter()
.map(|(_, refcount)| unsafe { refcount.as_ref() }.swap_maps())
.sum::<usize>();
let mut initial_residual = 0isize;

// Clippy doesn't like that we're iterating over something in a mutex apparently
#[allow(clippy::significant_drop_in_scrutinee)]
for (_, refcount) in guard.iter() {
let refcount = unsafe { refcount.as_ref() };

// Because the highest bit is used in the refcount, this cast will not be lossy
initial_residual += refcount.swap_maps() as isize;

// If we overflowed, then abort.
if initial_residual < 0 {
abort();
}
}

fence(Ordering::Acquire);

Expand All @@ -177,7 +203,7 @@ impl<K, V, S> Handle<K, V, S> {
let latest_residual = self.residual.fetch_add(initial_residual, Ordering::AcqRel);
let residual = initial_residual.wrapping_add(latest_residual);
if residual == 0 {
self.writer_state.store(WRITABLE, Ordering::Release);
self.writer_state.store(WRITABLE, Ordering::Relaxed);
} else {
debug_assert!(residual > 0);
}
Expand Down
1 change: 1 addition & 0 deletions src/core/refcount.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ impl RefCount {
Self::to_map_index(old_value)
}

/// The returned value, which we'll call `c`, will always satisfy `0 <= c <= isize::MAX`.
#[inline]
pub(super) fn swap_maps(&self) -> usize {
let old_value = self
Expand Down
13 changes: 1 addition & 12 deletions src/read.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use crate::{
core::{Handle, MapAccess, MapIndex, RefCount},
loom::cell::UnsafeCell,
loom::sync::Arc,
util::unlikely,
view::sealed::ReadAccess,
Map, View,
};
Expand Down Expand Up @@ -162,15 +163,3 @@ impl<'guard, K, V, S> Drop for ReadGuard<'guard, K, V, S> {
}
}
}

#[inline]
#[cold]
fn cold() {}

#[inline]
fn unlikely(b: bool) -> bool {
if b {
cold();
}
b
}
8 changes: 7 additions & 1 deletion src/util/loom.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
#[cfg(loom)]
pub use loom::{hint, sync, thread};
pub use loom::{hint, thread};

#[cfg(not(loom))]
pub use std::{hint, sync, thread};

#[cfg(loom)]
pub mod sync {
pub use loom::sync::*;
pub use std::sync::PoisonError;
}

#[cfg(loom)]
pub mod cell {
pub use loom::cell::Cell;
Expand Down
26 changes: 26 additions & 0 deletions src/util/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,29 @@ pub mod loom;

pub use aliasing::*;
pub use cache_padded::*;

use self::loom::sync::{Mutex, MutexGuard, PoisonError};

#[inline(always)]
pub(crate) fn lock<T>(mutex: &Mutex<T>) -> MutexGuard<'_, T> {
if cfg!(debug_assertions) {
mutex
.lock()
.expect("Internal mutex(es) should never be poisoned")
} else {
// At the moment this has the same asm as calling unwrap_unchecked
mutex.lock().unwrap_or_else(PoisonError::into_inner)
}
}

#[cold]
#[inline]
pub(crate) fn cold() {}

#[inline]
pub(crate) fn unlikely(b: bool) -> bool {
if b {
cold();
}
b
}

0 comments on commit a68a285

Please sign in to comment.