From 4597bc79a6354619e7617ee1e07c85c169969353 Mon Sep 17 00:00:00 2001 From: joboet Date: Tue, 28 Mar 2023 12:10:53 +0200 Subject: [PATCH] std: make `ReentrantLock` public --- library/std/src/io/stdio.rs | 45 ++- library/std/src/sync/mod.rs | 5 +- library/std/src/sync/reentrant_lock.rs | 320 ++++++++++++++++++ .../sync/{remutex => reentrant_lock}/tests.rs | 34 +- library/std/src/sync/remutex.rs | 178 ---------- 5 files changed, 375 insertions(+), 207 deletions(-) create mode 100644 library/std/src/sync/reentrant_lock.rs rename library/std/src/sync/{remutex => reentrant_lock}/tests.rs (53%) delete mode 100644 library/std/src/sync/remutex.rs diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index b2c57b8ddc78e..7b5545bf550fe 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -9,8 +9,9 @@ use crate::cell::{Cell, RefCell}; use crate::fmt; use crate::fs::File; use crate::io::{self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines}; +use crate::panic::{RefUnwindSafe, UnwindSafe}; use crate::sync::atomic::{AtomicBool, Ordering}; -use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard}; +use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantLock, ReentrantLockGuard}; use crate::sys::stdio; type LocalStream = Arc>>; @@ -536,7 +537,7 @@ pub struct Stdout { // FIXME: this should be LineWriter or BufWriter depending on the state of // stdout (tty or not). Note that if this is not line buffered it // should also flush-on-panic or some form of flush-on-abort. - inner: &'static ReentrantMutex>>, + inner: &'static ReentrantLock>>, } /// A locked reference to the [`Stdout`] handle. @@ -558,10 +559,10 @@ pub struct Stdout { #[must_use = "if unused stdout will immediately unlock"] #[stable(feature = "rust1", since = "1.0.0")] pub struct StdoutLock<'a> { - inner: ReentrantMutexGuard<'a, RefCell>>, + inner: ReentrantLockGuard<'a, RefCell>>, } -static STDOUT: OnceLock>>> = OnceLock::new(); +static STDOUT: OnceLock>>> = OnceLock::new(); /// Constructs a new handle to the standard output of the current process. /// @@ -614,7 +615,7 @@ static STDOUT: OnceLock>>> = OnceLo pub fn stdout() -> Stdout { Stdout { inner: STDOUT - .get_or_init(|| ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))), + .get_or_init(|| ReentrantLock::new(RefCell::new(LineWriter::new(stdout_raw())))), } } @@ -625,7 +626,7 @@ pub fn cleanup() { let mut initialized = false; let stdout = STDOUT.get_or_init(|| { initialized = true; - ReentrantMutex::new(RefCell::new(LineWriter::with_capacity(0, stdout_raw()))) + ReentrantLock::new(RefCell::new(LineWriter::with_capacity(0, stdout_raw()))) }); if !initialized { @@ -668,6 +669,12 @@ impl Stdout { } } +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl UnwindSafe for Stdout {} + +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl RefUnwindSafe for Stdout {} + #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Stdout { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -727,6 +734,12 @@ impl Write for &Stdout { } } +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl UnwindSafe for StdoutLock<'_> {} + +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl RefUnwindSafe for StdoutLock<'_> {} + #[stable(feature = "rust1", since = "1.0.0")] impl Write for StdoutLock<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { @@ -776,7 +789,7 @@ impl fmt::Debug for StdoutLock<'_> { /// standard library or via raw Windows API calls, will fail. #[stable(feature = "rust1", since = "1.0.0")] pub struct Stderr { - inner: &'static ReentrantMutex>, + inner: &'static ReentrantLock>, } /// A locked reference to the [`Stderr`] handle. @@ -798,7 +811,7 @@ pub struct Stderr { #[must_use = "if unused stderr will immediately unlock"] #[stable(feature = "rust1", since = "1.0.0")] pub struct StderrLock<'a> { - inner: ReentrantMutexGuard<'a, RefCell>, + inner: ReentrantLockGuard<'a, RefCell>, } /// Constructs a new handle to the standard error of the current process. @@ -851,8 +864,8 @@ pub fn stderr() -> Stderr { // Note that unlike `stdout()` we don't use `at_exit` here to register a // destructor. Stderr is not buffered, so there's no need to run a // destructor for flushing the buffer - static INSTANCE: ReentrantMutex> = - ReentrantMutex::new(RefCell::new(stderr_raw())); + static INSTANCE: ReentrantLock> = + ReentrantLock::new(RefCell::new(stderr_raw())); Stderr { inner: &INSTANCE } } @@ -887,6 +900,12 @@ impl Stderr { } } +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl UnwindSafe for Stderr {} + +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl RefUnwindSafe for Stderr {} + #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Stderr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -946,6 +965,12 @@ impl Write for &Stderr { } } +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl UnwindSafe for StderrLock<'_> {} + +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl RefUnwindSafe for StderrLock<'_> {} + #[stable(feature = "rust1", since = "1.0.0")] impl Write for StderrLock<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs index 19641753ffe1d..99ff1a1695275 100644 --- a/library/std/src/sync/mod.rs +++ b/library/std/src/sync/mod.rs @@ -180,7 +180,8 @@ pub use self::lazy_lock::LazyLock; #[stable(feature = "once_cell", since = "CURRENT_RUSTC_VERSION")] pub use self::once_lock::OnceLock; -pub(crate) use self::remutex::{ReentrantMutex, ReentrantMutexGuard}; +#[unstable(feature = "reentrant_lock", issue = "none")] +pub use self::reentrant_lock::{ReentrantLock, ReentrantLockGuard}; pub mod mpsc; @@ -192,5 +193,5 @@ mod mutex; pub(crate) mod once; mod once_lock; mod poison; -mod remutex; +mod reentrant_lock; mod rwlock; diff --git a/library/std/src/sync/reentrant_lock.rs b/library/std/src/sync/reentrant_lock.rs new file mode 100644 index 0000000000000..4dff898f677cb --- /dev/null +++ b/library/std/src/sync/reentrant_lock.rs @@ -0,0 +1,320 @@ +#[cfg(all(test, not(target_os = "emscripten")))] +mod tests; + +use crate::cell::UnsafeCell; +use crate::fmt; +use crate::ops::Deref; +use crate::panic::{RefUnwindSafe, UnwindSafe}; +use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +use crate::sys::locks as sys; + +/// A re-entrant mutual exclusion lock +/// +/// This lock will block *other* threads waiting for the lock to become +/// available. The thread which has already locked the mutex can lock it +/// multiple times without blocking, preventing a common source of deadlocks. +/// +/// # Examples +/// +/// Allow recursively calling a function needing synchronization from within +/// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently +/// implemented): +/// +/// ``` +/// #![feature(reentrant_lock)] +/// +/// use std::cell::RefCell; +/// use std::sync::ReentrantLock; +/// +/// pub struct Log { +/// data: RefCell, +/// } +/// +/// impl Log { +/// pub fn append(&self, msg: &str) { +/// self.data.borrow_mut().push_str(msg); +/// } +/// } +/// +/// static LOG: ReentrantLock = ReentrantLock::new(Log { data: RefCell::new(String::new()) }); +/// +/// pub fn with_log(f: impl FnOnce(&Log) -> R) -> R { +/// let log = LOG.lock(); +/// f(&*log) +/// } +/// +/// with_log(|log| { +/// log.append("Hello"); +/// with_log(|log| log.append(" there!")); +/// }); +/// ``` +/// +// # Implementation details +// +// The 'owner' field tracks which thread has locked the mutex. +// +// We use current_thread_unique_ptr() as the thread identifier, +// which is just the address of a thread local variable. +// +// If `owner` is set to the identifier of the current thread, +// we assume the mutex is already locked and instead of locking it again, +// we increment `lock_count`. +// +// When unlocking, we decrement `lock_count`, and only unlock the mutex when +// it reaches zero. +// +// `lock_count` is protected by the mutex and only accessed by the thread that has +// locked the mutex, so needs no synchronization. +// +// `owner` can be checked by other threads that want to see if they already +// hold the lock, so needs to be atomic. If it compares equal, we're on the +// same thread that holds the mutex and memory access can use relaxed ordering +// since we're not dealing with multiple threads. If it's not equal, +// synchronization is left to the mutex, making relaxed memory ordering for +// the `owner` field fine in all cases. +#[unstable(feature = "reentrant_lock", issue = "none")] +pub struct ReentrantLock { + mutex: sys::Mutex, + owner: AtomicUsize, + lock_count: UnsafeCell, + data: T, +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +unsafe impl Send for ReentrantLock {} +#[unstable(feature = "reentrant_lock", issue = "none")] +unsafe impl Sync for ReentrantLock {} + +// Because of the `UnsafeCell`, these traits are not implemented automatically +#[unstable(feature = "reentrant_lock", issue = "none")] +impl UnwindSafe for ReentrantLock {} +#[unstable(feature = "reentrant_lock", issue = "none")] +impl RefUnwindSafe for ReentrantLock {} + +/// An RAII implementation of a "scoped lock" of a re-entrant lock. When this +/// structure is dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// [`Deref`] implementation. +/// +/// This structure is created by the [`lock`](ReentrantLock::lock) method on +/// [`ReentrantLock`]. +/// +/// # Mutability +/// +/// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not +/// implement [`DerefMut`](crate::ops::DerefMut), because implementation of +/// the trait would violate Rust’s reference aliasing rules. Use interior +/// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate +/// the guarded data. +#[must_use = "if unused the ReentrantLock will immediately unlock"] +#[unstable(feature = "reentrant_lock", issue = "none")] +pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> { + lock: &'a ReentrantLock, +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl !Send for ReentrantLockGuard<'_, T> {} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl ReentrantLock { + /// Creates a new re-entrant lock in an unlocked state ready for use. + /// + /// # Examples + /// + /// ``` + /// #![feature(reentrant_lock)] + /// use std::sync::ReentrantLock; + /// + /// let lock = ReentrantLock::new(0); + /// ``` + pub const fn new(t: T) -> ReentrantLock { + ReentrantLock { + mutex: sys::Mutex::new(), + owner: AtomicUsize::new(0), + lock_count: UnsafeCell::new(0), + data: t, + } + } + + /// Consumes this lock, returning the underlying data. + /// + /// # Examples + /// + /// ``` + /// #![feature(reentrant_lock)] + /// + /// use std::sync::ReentrantLock; + /// + /// let lock = ReentrantLock::new(0); + /// assert_eq!(lock.into_inner(), 0); + /// ``` + pub fn into_inner(self) -> T { + self.data + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl ReentrantLock { + /// Acquires the lock, blocking the current thread until it is able to do + /// so. + /// + /// This function will block the caller until it is available to acquire + /// the lock. Upon returning, the thread is the only thread with the lock + /// held. When the thread calling this method already holds the lock, the + /// call succeeds without blocking. + /// + /// # Examples + /// + /// ``` + /// #![feature(reentrant_lock)] + /// use std::cell::Cell; + /// use std::sync::{Arc, ReentrantLock}; + /// use std::thread; + /// + /// let lock = Arc::new(ReentrantLock::new(Cell::new(0))); + /// let c_lock = Arc::clone(&lock); + /// + /// thread::spawn(move || { + /// c_lock.lock().set(10); + /// }).join().expect("thread::spawn failed"); + /// assert_eq!(lock.lock().get(), 10); + /// ``` + pub fn lock(&self) -> ReentrantLockGuard<'_, T> { + let this_thread = current_thread_unique_ptr(); + // Safety: We only touch lock_count when we own the lock. + unsafe { + if self.owner.load(Relaxed) == this_thread { + self.increment_lock_count().expect("lock count overflow in reentrant mutex"); + } else { + self.mutex.lock(); + self.owner.store(this_thread, Relaxed); + debug_assert_eq!(*self.lock_count.get(), 0); + *self.lock_count.get() = 1; + } + } + ReentrantLockGuard { lock: self } + } + + /// Returns a mutable reference to the underlying data. + /// + /// Since this call borrows the `ReentrantLock` mutably, no actual locking + /// needs to take place -- the mutable borrow statically guarantees no locks + /// exist. + /// + /// # Examples + /// + /// ``` + /// #![feature(reentrant_lock)] + /// use std::sync::ReentrantLock; + /// + /// let mut lock = ReentrantLock::new(0); + /// *lock.get_mut() = 10; + /// assert_eq!(*lock.lock(), 10); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.data + } + + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then `None` is returned. + /// Otherwise, an RAII guard is returned. + /// + /// This function does not block. + pub(crate) fn try_lock(&self) -> Option> { + let this_thread = current_thread_unique_ptr(); + // Safety: We only touch lock_count when we own the lock. + unsafe { + if self.owner.load(Relaxed) == this_thread { + self.increment_lock_count()?; + Some(ReentrantLockGuard { lock: self }) + } else if self.mutex.try_lock() { + self.owner.store(this_thread, Relaxed); + debug_assert_eq!(*self.lock_count.get(), 0); + *self.lock_count.get() = 1; + Some(ReentrantLockGuard { lock: self }) + } else { + None + } + } + } + + unsafe fn increment_lock_count(&self) -> Option<()> { + *self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?; + Some(()) + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl fmt::Debug for ReentrantLock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("ReentrantLock"); + match self.try_lock() { + Some(v) => d.field("data", &&*v), + None => d.field("data", &format_args!("")), + }; + d.finish_non_exhaustive() + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl Default for ReentrantLock { + fn default() -> Self { + Self::new(T::default()) + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl From for ReentrantLock { + fn from(t: T) -> Self { + Self::new(t) + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl Deref for ReentrantLockGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.data + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl fmt::Debug for ReentrantLockGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl fmt::Display for ReentrantLockGuard<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +#[unstable(feature = "reentrant_lock", issue = "none")] +impl Drop for ReentrantLockGuard<'_, T> { + #[inline] + fn drop(&mut self) { + // Safety: We own the lock. + unsafe { + *self.lock.lock_count.get() -= 1; + if *self.lock.lock_count.get() == 0 { + self.lock.owner.store(0, Relaxed); + self.lock.mutex.unlock(); + } + } + } +} + +/// Get an address that is unique per running thread. +/// +/// This can be used as a non-null usize-sized ID. +pub(crate) fn current_thread_unique_ptr() -> usize { + // Use a non-drop type to make sure it's still available during thread destruction. + thread_local! { static X: u8 = const { 0 } } + X.with(|x| <*const _>::addr(x)) +} diff --git a/library/std/src/sync/remutex/tests.rs b/library/std/src/sync/reentrant_lock/tests.rs similarity index 53% rename from library/std/src/sync/remutex/tests.rs rename to library/std/src/sync/reentrant_lock/tests.rs index fc553081d4227..d4c1d440c6109 100644 --- a/library/std/src/sync/remutex/tests.rs +++ b/library/std/src/sync/reentrant_lock/tests.rs @@ -1,17 +1,17 @@ -use super::{ReentrantMutex, ReentrantMutexGuard}; +use super::{ReentrantLock, ReentrantLockGuard}; use crate::cell::RefCell; use crate::sync::Arc; use crate::thread; #[test] fn smoke() { - let m = ReentrantMutex::new(()); + let l = ReentrantLock::new(()); { - let a = m.lock(); + let a = l.lock(); { - let b = m.lock(); + let b = l.lock(); { - let c = m.lock(); + let c = l.lock(); assert_eq!(*c, ()); } assert_eq!(*b, ()); @@ -22,15 +22,15 @@ fn smoke() { #[test] fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let m2 = m.clone(); - let lock = m.lock(); + let l = Arc::new(ReentrantLock::new(RefCell::new(0))); + let l2 = l.clone(); + let lock = l.lock(); let child = thread::spawn(move || { - let lock = m2.lock(); + let lock = l2.lock(); assert_eq!(*lock.borrow(), 4950); }); for i in 0..100 { - let lock = m.lock(); + let lock = l.lock(); *lock.borrow_mut() += i; } drop(lock); @@ -39,20 +39,20 @@ fn is_mutex() { #[test] fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); - let m2 = m.clone(); - let _lock = m.try_lock(); - let _lock2 = m.try_lock(); + let l = Arc::new(ReentrantLock::new(())); + let l2 = l.clone(); + let _lock = l.try_lock(); + let _lock2 = l.try_lock(); thread::spawn(move || { - let lock = m2.try_lock(); + let lock = l2.try_lock(); assert!(lock.is_none()); }) .join() .unwrap(); - let _lock3 = m.try_lock(); + let _lock3 = l.try_lock(); } -pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); +pub struct Answer<'a>(pub ReentrantLockGuard<'a, RefCell>); impl Drop for Answer<'_> { fn drop(&mut self) { *self.0.borrow_mut() = 42; diff --git a/library/std/src/sync/remutex.rs b/library/std/src/sync/remutex.rs deleted file mode 100644 index 0ced48d10b7c6..0000000000000 --- a/library/std/src/sync/remutex.rs +++ /dev/null @@ -1,178 +0,0 @@ -#[cfg(all(test, not(target_os = "emscripten")))] -mod tests; - -use crate::cell::UnsafeCell; -use crate::ops::Deref; -use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use crate::sys::locks as sys; - -/// A reentrant mutual exclusion -/// -/// This mutex will block *other* threads waiting for the lock to become -/// available. The thread which has already locked the mutex can lock it -/// multiple times without blocking, preventing a common source of deadlocks. -/// -/// This is used by stdout().lock() and friends. -/// -/// ## Implementation details -/// -/// The 'owner' field tracks which thread has locked the mutex. -/// -/// We use current_thread_unique_ptr() as the thread identifier, -/// which is just the address of a thread local variable. -/// -/// If `owner` is set to the identifier of the current thread, -/// we assume the mutex is already locked and instead of locking it again, -/// we increment `lock_count`. -/// -/// When unlocking, we decrement `lock_count`, and only unlock the mutex when -/// it reaches zero. -/// -/// `lock_count` is protected by the mutex and only accessed by the thread that has -/// locked the mutex, so needs no synchronization. -/// -/// `owner` can be checked by other threads that want to see if they already -/// hold the lock, so needs to be atomic. If it compares equal, we're on the -/// same thread that holds the mutex and memory access can use relaxed ordering -/// since we're not dealing with multiple threads. If it's not equal, -/// synchronization is left to the mutex, making relaxed memory ordering for -/// the `owner` field fine in all cases. -pub struct ReentrantMutex { - mutex: sys::Mutex, - owner: AtomicUsize, - lock_count: UnsafeCell, - data: T, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl UnwindSafe for ReentrantMutex {} -impl RefUnwindSafe for ReentrantMutex {} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// Deref implementation. -/// -/// # Mutability -/// -/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`, -/// because implementation of the trait would violate Rust’s reference aliasing -/// rules. Use interior mutability (usually `RefCell`) in order to mutate the -/// guarded data. -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ReentrantMutexGuard<'a, T: 'a> { - lock: &'a ReentrantMutex, -} - -impl !Send for ReentrantMutexGuard<'_, T> {} - -impl ReentrantMutex { - /// Creates a new reentrant mutex in an unlocked state. - pub const fn new(t: T) -> ReentrantMutex { - ReentrantMutex { - mutex: sys::Mutex::new(), - owner: AtomicUsize::new(0), - lock_count: UnsafeCell::new(0), - data: t, - } - } - - /// Acquires a mutex, blocking the current thread until it is able to do so. - /// - /// This function will block the caller until it is available to acquire the mutex. - /// Upon returning, the thread is the only thread with the mutex held. When the thread - /// calling this method already holds the lock, the call shall succeed without - /// blocking. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - } else { - self.mutex.lock(); - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - } - } - ReentrantMutexGuard { lock: self } - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `Err` is returned. - /// Otherwise, an RAII guard is returned. - /// - /// This function does not block. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn try_lock(&self) -> Option> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - Some(ReentrantMutexGuard { lock: self }) - } else if self.mutex.try_lock() { - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - Some(ReentrantMutexGuard { lock: self }) - } else { - None - } - } - } - - unsafe fn increment_lock_count(&self) { - *self.lock_count.get() = (*self.lock_count.get()) - .checked_add(1) - .expect("lock count overflow in reentrant mutex"); - } -} - -impl Deref for ReentrantMutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - &self.lock.data - } -} - -impl Drop for ReentrantMutexGuard<'_, T> { - #[inline] - fn drop(&mut self) { - // Safety: We own the lock. - unsafe { - *self.lock.lock_count.get() -= 1; - if *self.lock.lock_count.get() == 0 { - self.lock.owner.store(0, Relaxed); - self.lock.mutex.unlock(); - } - } - } -} - -/// Get an address that is unique per running thread. -/// -/// This can be used as a non-null usize-sized ID. -pub fn current_thread_unique_ptr() -> usize { - // Use a non-drop type to make sure it's still available during thread destruction. - thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) -}