From c031d20d6ff50b82d2aa7cd39adbdd3257533d9d Mon Sep 17 00:00:00 2001 From: Steven Stewart-Gallus Date: Fri, 28 Jun 2013 22:46:17 -0700 Subject: [PATCH] Refactor extra::sync, and extra::arc Convert functionality into RAII style. Split functionality up into the modules. Cleanup dependencies, and redundancies. --- doc/tutorial-tasks.md | 46 +- src/libextra/arc.rs | 878 ---------- src/libextra/extra.rs | 3 +- src/libextra/sync.rs | 1444 ----------------- src/libextra/sync/arc.rs | 74 + src/libextra/sync/condition.rs | 55 + src/libextra/sync/mod.rs | 36 + src/libextra/sync/mutex.rs | 357 ++++ src/libextra/sync/rwlock.rs | 445 +++++ src/libextra/sync/semaphore.rs | 117 ++ src/libextra/sync/shared_mut/mutex_arc.rs | 188 +++ src/libextra/sync/shared_mut/rwarc.rs | 458 ++++++ src/libextra/sync/unlock.rs | 51 + src/libextra/sync/wait_queue.rs | 204 +++ src/test/bench/graph500-bfs.rs | 8 +- src/test/bench/msgsend-ring-mutex-arcs.rs | 58 +- src/test/bench/msgsend-ring-rw-arcs.rs | 55 +- .../arc-rw-cond-shouldnt-escape.rs | 21 - .../arc-rw-read-mode-shouldnt-escape.rs | 23 - .../arc-rw-state-shouldnt-escape.rs | 16 +- .../arc-rw-write-mode-cond-shouldnt-escape.rs | 23 - .../arc-rw-write-mode-shouldnt-escape.rs | 23 - src/test/compile-fail/no-capture-arc.rs | 4 +- src/test/compile-fail/no-reuse-move-arc.rs | 4 +- .../once-cant-call-twice-on-heap.rs | 4 +- .../once-cant-call-twice-on-stack.rs | 4 +- .../once-cant-move-out-of-non-once-on-heap.rs | 4 +- ...once-cant-move-out-of-non-once-on-stack.rs | 4 +- .../compile-fail/sync-cond-shouldnt-escape.rs | 22 - .../sync-rwlock-cond-shouldnt-escape.rs | 21 - .../sync-rwlock-read-mode-shouldnt-escape.rs | 22 - ...-rwlock-write-mode-cond-shouldnt-escape.rs | 23 - .../sync-rwlock-write-mode-shouldnt-escape.rs | 22 - ...vents-values-from-continuing-to-be-used.rs | 33 + src/test/run-fail/issue-2444.rs | 4 +- src/test/run-pass/bind-by-move.rs | 8 +- src/test/run-pass/once-move-out-on-heap.rs | 4 +- src/test/run-pass/once-move-out-on-stack.rs | 4 +- src/test/run-pass/trait-bounds-in-arc.rs | 12 +- 39 files changed, 2152 insertions(+), 2630 deletions(-) delete mode 100644 src/libextra/arc.rs delete mode 100644 src/libextra/sync.rs create mode 100644 src/libextra/sync/arc.rs create mode 100644 src/libextra/sync/condition.rs create mode 100644 src/libextra/sync/mod.rs create mode 100644 src/libextra/sync/mutex.rs create mode 100644 src/libextra/sync/rwlock.rs create mode 100644 src/libextra/sync/semaphore.rs create mode 100644 src/libextra/sync/shared_mut/mutex_arc.rs create mode 100644 src/libextra/sync/shared_mut/rwarc.rs create mode 100644 src/libextra/sync/unlock.rs create mode 100644 src/libextra/sync/wait_queue.rs delete mode 100644 src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/sync-cond-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs delete mode 100644 src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs create mode 100644 src/test/compile-fail/unlocking-prevents-values-from-continuing-to-be-used.rs diff --git a/doc/tutorial-tasks.md b/doc/tutorial-tasks.md index d302916025c09..04a1928618423 100644 --- a/doc/tutorial-tasks.md +++ b/doc/tutorial-tasks.md @@ -50,7 +50,7 @@ concurrency at this writing: * [`std::pipes`] - The underlying messaging infrastructure, * [`extra::comm`] - Additional messaging types based on `std::pipes`, * [`extra::sync`] - More exotic synchronization tools, including locks, -* [`extra::arc`] - The ARC (atomically reference counted) type, +* [`extra::sync::arc`] - The Arc (atomically reference counted) type, for safely sharing immutable data, * [`extra::future`] - A type representing values that may be computed concurrently and retrieved at a later time. @@ -59,7 +59,7 @@ concurrency at this writing: [`std::pipes`]: std/pipes.html [`extra::comm`]: extra/comm.html [`extra::sync`]: extra/sync.html -[`extra::arc`]: extra/arc.html +[`extra::sync::arc`]: extra/sync/arc.html [`extra::future`]: extra/future.html # Basics @@ -334,24 +334,24 @@ fn main() { } ~~~ -## Sharing immutable data without copy: ARC +## Sharing immutable data without copy: Arc To share immutable data between tasks, a first approach would be to only use pipes as we have seen previously. A copy of the data to share would then be made for each task. In some cases, this would add up to a significant amount of wasted memory and would require copying the same data more than necessary. -To tackle this issue, one can use an Atomically Reference Counted wrapper (`ARC`) as implemented in -the `extra` library of Rust. With an ARC, the data will no longer be copied for each task. The ARC -acts as a reference to the shared data and only this reference is shared and cloned. +To tackle this issue, one can use an Atomically Reference Counted wrapper (`Arc`) as implemented in +the `extra::sync::arc` library of Rust. With an ARC, the data will no longer be copied for each task. +The Arc acts as a reference to the shared data and only this reference is shared and cloned. -Here is a small example showing how to use ARCs. We wish to run concurrently several computations on +Here is a small example showing how to use Arcs. We wish to run concurrently several computations on a single large vector of floats. Each task needs the full vector to perform its duty. ~~~ # use std::vec; # use std::uint; # use std::rand; -use extra::arc::ARC; +use extra::sync::arc::Arc; fn pnorm(nums: &~[float], p: uint) -> float { nums.iter().fold(0.0, |a,b| a+(*b).pow(&(p as float)) ).pow(&(1f / (p as float))) @@ -361,14 +361,14 @@ fn main() { let numbers = vec::from_fn(1000000, |_| rand::random::()); println(fmt!("Inf-norm = %?", *numbers.iter().max().unwrap())); - let numbers_arc = ARC(numbers); + let numbers_arc = Arc::new(numbers); for uint::range(1,10) |num| { let (port, chan) = stream(); chan.send(numbers_arc.clone()); do spawn { - let local_arc : ARC<~[float]> = port.recv(); + let local_arc : Arc<~[float]> = port.recv(); let task_numbers = local_arc.get(); println(fmt!("%u-norm = %?", num, pnorm(task_numbers, num))); } @@ -377,42 +377,42 @@ fn main() { ~~~ The function `pnorm` performs a simple computation on the vector (it computes the sum of its items -at the power given as argument and takes the inverse power of this value). The ARC on the vector is +at the power given as argument and takes the inverse power of this value). The Arc on the vector is created by the line ~~~ -# use extra::arc::ARC; +# use extra::sync::arc::Arc; # use std::vec; # use std::rand; # let numbers = vec::from_fn(1000000, |_| rand::random::()); -let numbers_arc=ARC(numbers); +let numbers_arc = Arc::new(numbers); ~~~ and a clone of it is sent to each task ~~~ -# use extra::arc::ARC; +# use extra::sync::arc::Arc; # use std::vec; # use std::rand; -# let numbers=vec::from_fn(1000000, |_| rand::random::()); -# let numbers_arc = ARC(numbers); -# let (port, chan) = stream(); +# let numbers = vec::from_fn(1000000, |_| rand::random::()); +# let numbers_arc = Arc::new(numbers); +# let (port, chan) = stream(); chan.send(numbers_arc.clone()); ~~~ copying only the wrapper and not its contents. Each task recovers the underlying data by ~~~ -# use extra::arc::ARC; +# use extra::sync::arc::Arc; # use std::vec; # use std::rand; -# let numbers=vec::from_fn(1000000, |_| rand::random::()); -# let numbers_arc=ARC(numbers); -# let (port, chan) = stream(); +# let numbers = vec::from_fn(1000000, |_| rand::random::()); +# let numbers_arc = Arc::new(numbers); +# let (port, chan) = stream(); # chan.send(numbers_arc.clone()); -# let local_arc : ARC<~[float]> = port.recv(); +# let local_arc : Arc<~[float]> = port.recv(); let task_numbers = local_arc.get(); ~~~ and can use it as if it were local. -The `arc` module also implements ARCs around mutable data that are not covered here. +The `sync` module also implements shared mutable data that is not covered here. # Handling task failure diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs deleted file mode 100644 index 054b4ce5177d5..0000000000000 --- a/src/libextra/arc.rs +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - * Concurrency-enabled mechanisms for sharing mutable and/or immutable state - * between tasks. - * - * # Example - * - * In this example, a large vector of floats is shared between several tasks. - * With simple pipes, without ARC, a copy would have to be made for each task. - * - * ~~~ {.rust} - * extern mod std; - * use extra::arc; - * let numbers=vec::from_fn(100, |ind| (ind as float)*rand::random()); - * let shared_numbers=arc::ARC(numbers); - * - * for 10.times { - * let (port, chan) = stream(); - * chan.send(shared_numbers.clone()); - * - * do spawn { - * let shared_numbers=port.recv(); - * let local_numbers=shared_numbers.get(); - * - * // Work with the local numbers - * } - * } - * ~~~ - */ - -#[allow(missing_doc)]; - - -use sync; -use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars}; - -use std::cast; -use std::unstable::sync::UnsafeAtomicRcBox; -use std::task; -use std::borrow; - -/// As sync::condvar, a mechanism for unlock-and-descheduling and signaling. -pub struct Condvar<'self> { - is_mutex: bool, - failed: &'self mut bool, - cond: &'self sync::Condvar<'self> -} - -impl<'self> Condvar<'self> { - /// Atomically exit the associated ARC and block until a signal is sent. - #[inline] - pub fn wait(&self) { self.wait_on(0) } - - /** - * Atomically exit the associated ARC and block on a specified condvar - * until a signal is sent on that same condvar (as sync::cond.wait_on). - * - * wait() is equivalent to wait_on(0). - */ - #[inline] - pub fn wait_on(&self, condvar_id: uint) { - assert!(!*self.failed); - self.cond.wait_on(condvar_id); - // This is why we need to wrap sync::condvar. - check_poison(self.is_mutex, *self.failed); - } - - /// Wake up a blocked task. Returns false if there was no blocked task. - #[inline] - pub fn signal(&self) -> bool { self.signal_on(0) } - - /** - * Wake up a blocked task on a specified condvar (as - * sync::cond.signal_on). Returns false if there was no blocked task. - */ - #[inline] - pub fn signal_on(&self, condvar_id: uint) -> bool { - assert!(!*self.failed); - self.cond.signal_on(condvar_id) - } - - /// Wake up all blocked tasks. Returns the number of tasks woken. - #[inline] - pub fn broadcast(&self) -> uint { self.broadcast_on(0) } - - /** - * Wake up all blocked tasks on a specified condvar (as - * sync::cond.broadcast_on). Returns the number of tasks woken. - */ - #[inline] - pub fn broadcast_on(&self, condvar_id: uint) -> uint { - assert!(!*self.failed); - self.cond.broadcast_on(condvar_id) - } -} - -/**************************************************************************** - * Immutable ARC - ****************************************************************************/ - -/// An atomically reference counted wrapper for shared immutable state. -pub struct ARC { x: UnsafeAtomicRcBox } - -/// Create an atomically reference counted wrapper. -pub fn ARC(data: T) -> ARC { - ARC { x: UnsafeAtomicRcBox::new(data) } -} - -/** - * Access the underlying data in an atomically reference counted - * wrapper. - */ -impl ARC { - pub fn get<'a>(&'a self) -> &'a T { - unsafe { &*self.x.get_immut() } - } -} - -/** - * Duplicate an atomically reference counted wrapper. - * - * The resulting two `arc` objects will point to the same underlying data - * object. However, one of the `arc` objects can be sent to another task, - * allowing them to share the underlying data. - */ -impl Clone for ARC { - fn clone(&self) -> ARC { - ARC { x: self.x.clone() } - } -} - -/**************************************************************************** - * Mutex protected ARC (unsafe) - ****************************************************************************/ - -#[doc(hidden)] -struct MutexARCInner { lock: Mutex, failed: bool, data: T } -/// An ARC with mutable data protected by a blocking mutex. -struct MutexARC { x: UnsafeAtomicRcBox> } - -/// Create a mutex-protected ARC with the supplied data. -pub fn MutexARC(user_data: T) -> MutexARC { - mutex_arc_with_condvars(user_data, 1) -} -/** - * Create a mutex-protected ARC with the supplied data and a specified number - * of condvars (as sync::mutex_with_condvars). - */ -pub fn mutex_arc_with_condvars(user_data: T, - num_condvars: uint) -> MutexARC { - let data = - MutexARCInner { lock: mutex_with_condvars(num_condvars), - failed: false, data: user_data }; - MutexARC { x: UnsafeAtomicRcBox::new(data) } -} - -impl Clone for MutexARC { - /// Duplicate a mutex-protected ARC, as arc::clone. - fn clone(&self) -> MutexARC { - // NB: Cloning the underlying mutex is not necessary. Its reference - // count would be exactly the same as the shared state's. - MutexARC { x: self.x.clone() } - } -} - -impl MutexARC { - - /** - * Access the underlying mutable data with mutual exclusion from other - * tasks. The argument closure will be run with the mutex locked; all - * other tasks wishing to access the data will block until the closure - * finishes running. - * - * The reason this function is 'unsafe' is because it is possible to - * construct a circular reference among multiple ARCs by mutating the - * underlying data. This creates potential for deadlock, but worse, this - * will guarantee a memory leak of all involved ARCs. Using mutex ARCs - * inside of other ARCs is safe in absence of circular references. - * - * If you wish to nest mutex_arcs, one strategy for ensuring safety at - * runtime is to add a "nesting level counter" inside the stored data, and - * when traversing the arcs, assert that they monotonically decrease. - * - * # Failure - * - * Failing while inside the ARC will unlock the ARC while unwinding, so - * that other tasks won't block forever. It will also poison the ARC: - * any tasks that subsequently try to access it (including those already - * blocked on the mutex) will also fail immediately. - */ - #[inline] - pub unsafe fn access(&self, blk: &fn(x: &mut T) -> U) -> U { - let state = self.x.get(); - // Borrowck would complain about this if the function were - // not already unsafe. See borrow_rwlock, far below. - do (&(*state).lock).lock { - check_poison(true, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data) - } - } - - /// As access(), but with a condvar, as sync::mutex.lock_cond(). - #[inline] - pub unsafe fn access_cond<'x, 'c, U>(&self, - blk: &fn(x: &'x mut T, - c: &'c Condvar) -> U) - -> U { - let state = self.x.get(); - do (&(*state).lock).lock_cond |cond| { - check_poison(true, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data, - &Condvar {is_mutex: true, - failed: &mut (*state).failed, - cond: cond }) - } - } -} - -// Common code for {mutex.access,rwlock.write}{,_cond}. -#[inline] -#[doc(hidden)] -fn check_poison(is_mutex: bool, failed: bool) { - if failed { - if is_mutex { - fail!("Poisoned MutexARC - another task failed inside!"); - } else { - fail!("Poisoned rw_arc - another task failed inside!"); - } - } -} - -#[doc(hidden)] -struct PoisonOnFail { - failed: *mut bool, -} - -impl Drop for PoisonOnFail { - fn drop(&self) { - unsafe { - /* assert!(!*self.failed); - -- might be false in case of cond.wait() */ - if task::failing() { - *self.failed = true; - } - } - } -} - -fn PoisonOnFail<'r>(failed: &'r mut bool) -> PoisonOnFail { - PoisonOnFail { - failed: failed - } -} - -/**************************************************************************** - * R/W lock protected ARC - ****************************************************************************/ - -#[doc(hidden)] -struct RWARCInner { lock: RWlock, failed: bool, data: T } -/** - * A dual-mode ARC protected by a reader-writer lock. The data can be accessed - * mutably or immutably, and immutably-accessing tasks may run concurrently. - * - * Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested. - */ -#[mutable] // XXX remove after snap -#[no_freeze] -struct RWARC { - x: UnsafeAtomicRcBox>, -} - -/// Create a reader/writer ARC with the supplied data. -pub fn RWARC(user_data: T) -> RWARC { - rw_arc_with_condvars(user_data, 1) -} -/** - * Create a reader/writer ARC with the supplied data and a specified number - * of condvars (as sync::rwlock_with_condvars). - */ -pub fn rw_arc_with_condvars( - user_data: T, - num_condvars: uint) -> RWARC -{ - let data = - RWARCInner { lock: rwlock_with_condvars(num_condvars), - failed: false, data: user_data }; - RWARC { x: UnsafeAtomicRcBox::new(data), } -} - -impl RWARC { - /// Duplicate a rwlock-protected ARC, as arc::clone. - pub fn clone(&self) -> RWARC { - RWARC { - x: self.x.clone(), - } - } - -} - -impl RWARC { - /** - * Access the underlying data mutably. Locks the rwlock in write mode; - * other readers and writers will block. - * - * # Failure - * - * Failing while inside the ARC will unlock the ARC while unwinding, so - * that other tasks won't block forever. As MutexARC.access, it will also - * poison the ARC, so subsequent readers and writers will both also fail. - */ - #[inline] - pub fn write(&self, blk: &fn(x: &mut T) -> U) -> U { - unsafe { - let state = self.x.get(); - do (*borrow_rwlock(state)).write { - check_poison(false, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data) - } - } - } - - /// As write(), but with a condvar, as sync::rwlock.write_cond(). - #[inline] - pub fn write_cond<'x, 'c, U>(&self, - blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) - -> U { - unsafe { - let state = self.x.get(); - do (*borrow_rwlock(state)).write_cond |cond| { - check_poison(false, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data, - &Condvar {is_mutex: false, - failed: &mut (*state).failed, - cond: cond}) - } - } - } - - /** - * Access the underlying data immutably. May run concurrently with other - * reading tasks. - * - * # Failure - * - * Failing will unlock the ARC while unwinding. However, unlike all other - * access modes, this will not poison the ARC. - */ - pub fn read(&self, blk: &fn(x: &T) -> U) -> U { - unsafe { - let state = self.x.get(); - do (*state).lock.read { - check_poison(false, (*state).failed); - blk(&(*state).data) - } - } - } - - /** - * As write(), but with the ability to atomically 'downgrade' the lock. - * See sync::rwlock.write_downgrade(). The RWWriteMode token must be used - * to obtain the &mut T, and can be transformed into a RWReadMode token by - * calling downgrade(), after which a &T can be obtained instead. - * - * # Example - * - * ~~~ {.rust} - * do arc.write_downgrade |mut write_token| { - * do write_token.write_cond |state, condvar| { - * ... exclusive access with mutable state ... - * } - * let read_token = arc.downgrade(write_token); - * do read_token.read |state| { - * ... shared access with immutable state ... - * } - * } - * ~~~ - */ - pub fn write_downgrade(&self, blk: &fn(v: RWWriteMode) -> U) -> U { - unsafe { - let state = self.x.get(); - do (*borrow_rwlock(state)).write_downgrade |write_mode| { - check_poison(false, (*state).failed); - blk(RWWriteMode { - data: &mut (*state).data, - token: write_mode, - poison: PoisonOnFail(&mut (*state).failed) - }) - } - } - } - - /// To be called inside of the write_downgrade block. - pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>) - -> RWReadMode<'a, T> { - unsafe { - // The rwlock should assert that the token belongs to us for us. - let state = self.x.get(); - let RWWriteMode { - data: data, - token: t, - poison: _poison - } = token; - // Let readers in - let new_token = (*state).lock.downgrade(t); - // Whatever region the input reference had, it will be safe to use - // the same region for the output reference. (The only 'unsafe' part - // of this cast is removing the mutability.) - let new_data = cast::transmute_immut(data); - // Downgrade ensured the token belonged to us. Just a sanity check. - assert!(borrow::ref_eq(&(*state).data, new_data)); - // Produce new token - RWReadMode { - data: new_data, - token: new_token, - } - } - } -} - -// Borrowck rightly complains about immutably aliasing the rwlock in order to -// lock it. This wraps the unsafety, with the justification that the 'lock' -// field is never overwritten; only 'failed' and 'data'. -#[doc(hidden)] -fn borrow_rwlock(state: *mut RWARCInner) -> *RWlock { - unsafe { cast::transmute(&(*state).lock) } -} - -/// The "write permission" token used for RWARC.write_downgrade(). -pub struct RWWriteMode<'self, T> { - data: &'self mut T, - token: sync::RWlockWriteMode<'self>, - poison: PoisonOnFail, -} - -/// The "read permission" token used for RWARC.write_downgrade(). -pub struct RWReadMode<'self, T> { - data: &'self T, - token: sync::RWlockReadMode<'self>, -} - -impl<'self, T:Freeze + Send> RWWriteMode<'self, T> { - /// Access the pre-downgrade RWARC in write mode. - pub fn write(&mut self, blk: &fn(x: &mut T) -> U) -> U { - match *self { - RWWriteMode { - data: &ref mut data, - token: ref token, - poison: _ - } => { - do token.write { - blk(data) - } - } - } - } - - /// Access the pre-downgrade RWARC in write mode with a condvar. - pub fn write_cond<'x, 'c, U>(&mut self, - blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) - -> U { - match *self { - RWWriteMode { - data: &ref mut data, - token: ref token, - poison: ref poison - } => { - do token.write_cond |cond| { - unsafe { - let cvar = Condvar { - is_mutex: false, - failed: &mut *poison.failed, - cond: cond - }; - blk(data, &cvar) - } - } - } - } - } -} - -impl<'self, T:Freeze + Send> RWReadMode<'self, T> { - /// Access the post-downgrade rwlock in read mode. - pub fn read(&self, blk: &fn(x: &T) -> U) -> U { - match *self { - RWReadMode { - data: data, - token: ref token - } => { - do token.read { blk(data) } - } - } - } -} - -/**************************************************************************** - * Tests - ****************************************************************************/ - -#[cfg(test)] -mod tests { - - use arc::*; - - use std::cell::Cell; - use std::comm; - use std::task; - use std::uint; - - #[test] - fn manually_share_arc() { - let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - let arc_v = ARC(v); - - let (p, c) = comm::stream(); - - do task::spawn() || { - let p = comm::PortSet::new(); - c.send(p.chan()); - - let arc_v : ARC<~[int]> = p.recv(); - - let v = (*arc_v.get()).clone(); - assert_eq!(v[3], 4); - }; - - let c = p.recv(); - c.send(arc_v.clone()); - - assert_eq!(arc_v.get()[2], 3); - assert_eq!(arc_v.get()[4], 5); - - info!(arc_v); - } - - #[test] - fn test_mutex_arc_condvar() { - unsafe { - let arc = ~MutexARC(false); - let arc2 = ~arc.clone(); - let (p,c) = comm::oneshot(); - let (c,p) = (Cell::new(c), Cell::new(p)); - do task::spawn || { - // wait until parent gets in - comm::recv_one(p.take()); - do arc2.access_cond |state, cond| { - *state = true; - cond.signal(); - } - } - do arc.access_cond |state, cond| { - comm::send_one(c.take(), ()); - assert!(!*state); - while !*state { - cond.wait(); - } - } - } - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_arc_condvar_poison() { - unsafe { - let arc = ~MutexARC(1); - let arc2 = ~arc.clone(); - let (p, c) = comm::stream(); - - do task::spawn_unlinked || { - let _ = p.recv(); - do arc2.access_cond |one, cond| { - cond.signal(); - // Parent should fail when it wakes up. - assert_eq!(*one, 0); - } - } - - do arc.access_cond |one, cond| { - c.send(()); - while *one == 1 { - cond.wait(); - } - } - } - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_mutex_arc_poison() { - unsafe { - let arc = ~MutexARC(1); - let arc2 = ~arc.clone(); - do task::try || { - do arc2.access |one| { - assert_eq!(*one, 2); - } - }; - do arc.access |one| { - assert_eq!(*one, 1); - } - } - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_rw_arc_poison_wr() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.write |one| { - assert_eq!(*one, 2); - } - }; - do arc.read |one| { - assert_eq!(*one, 1); - } - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_rw_arc_poison_ww() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.write |one| { - assert_eq!(*one, 2); - } - }; - do arc.write |one| { - assert_eq!(*one, 1); - } - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_rw_arc_poison_dw() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.write_downgrade |mut write_mode| { - do write_mode.write |one| { - assert_eq!(*one, 2); - } - } - }; - do arc.write |one| { - assert_eq!(*one, 1); - } - } - #[test] #[ignore(cfg(windows))] - fn test_rw_arc_no_poison_rr() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.read |one| { - assert_eq!(*one, 2); - } - }; - do arc.read |one| { - assert_eq!(*one, 1); - } - } - #[test] #[ignore(cfg(windows))] - fn test_rw_arc_no_poison_rw() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.read |one| { - assert_eq!(*one, 2); - } - }; - do arc.write |one| { - assert_eq!(*one, 1); - } - } - #[test] #[ignore(cfg(windows))] - fn test_rw_arc_no_poison_dr() { - let arc = ~RWARC(1); - let arc2 = (*arc).clone(); - do task::try || { - do arc2.write_downgrade |write_mode| { - let read_mode = arc2.downgrade(write_mode); - do (&read_mode).read |one| { - assert_eq!(*one, 2); - } - } - }; - do arc.write |one| { - assert_eq!(*one, 1); - } - } - #[test] - fn test_rw_arc() { - let arc = ~RWARC(0); - let arc2 = (*arc).clone(); - let (p,c) = comm::stream(); - - do task::spawn || { - do arc2.write |num| { - for 10.times { - let tmp = *num; - *num = -1; - task::yield(); - *num = tmp + 1; - } - c.send(()); - } - } - - // Readers try to catch the writer in the act - let mut children = ~[]; - for 5.times { - let arc3 = (*arc).clone(); - let mut builder = task::task(); - builder.future_result(|r| children.push(r)); - do builder.spawn { - do arc3.read |num| { - assert!(*num >= 0); - } - } - } - - // Wait for children to pass their asserts - for children.iter().advance |r| { - r.recv(); - } - - // Wait for writer to finish - p.recv(); - do arc.read |num| { - assert_eq!(*num, 10); - } - } - #[test] - fn test_rw_downgrade() { - // (1) A downgrader gets in write mode and does cond.wait. - // (2) A writer gets in write mode, sets state to 42, and does signal. - // (3) Downgrader wakes, sets state to 31337. - // (4) tells writer and all other readers to contend as it downgrades. - // (5) Writer attempts to set state back to 42, while downgraded task - // and all reader tasks assert that it's 31337. - let arc = ~RWARC(0); - - // Reader tasks - let mut reader_convos = ~[]; - for 10.times { - let ((rp1,rc1),(rp2,rc2)) = (comm::stream(),comm::stream()); - reader_convos.push((rc1, rp2)); - let arcn = (*arc).clone(); - do task::spawn || { - rp1.recv(); // wait for downgrader to give go-ahead - do arcn.read |state| { - assert_eq!(*state, 31337); - rc2.send(()); - } - } - } - - // Writer task - let arc2 = (*arc).clone(); - let ((wp1,wc1),(wp2,wc2)) = (comm::stream(),comm::stream()); - do task::spawn || { - wp1.recv(); - do arc2.write_cond |state, cond| { - assert_eq!(*state, 0); - *state = 42; - cond.signal(); - } - wp1.recv(); - do arc2.write |state| { - // This shouldn't happen until after the downgrade read - // section, and all other readers, finish. - assert_eq!(*state, 31337); - *state = 42; - } - wc2.send(()); - } - - // Downgrader (us) - do arc.write_downgrade |mut write_mode| { - do write_mode.write_cond |state, cond| { - wc1.send(()); // send to another writer who will wake us up - while *state == 0 { - cond.wait(); - } - assert_eq!(*state, 42); - *state = 31337; - // FIXME: #7372: hits type inference bug with iterators - // send to other readers - for uint::range(0, reader_convos.len()) |i| { - match reader_convos[i] { - (ref rc, _) => rc.send(()), - } - } - } - let read_mode = arc.downgrade(write_mode); - do (&read_mode).read |state| { - // FIXME: #7372: hits type inference bug with iterators - // complete handshake with other readers - for uint::range(0, reader_convos.len()) |i| { - match reader_convos[i] { - (_, ref rp) => rp.recv(), - } - } - wc1.send(()); // tell writer to try again - assert_eq!(*state, 31337); - } - } - - wp2.recv(); // complete handshake with writer - } - #[cfg(test)] - fn test_rw_write_cond_downgrade_read_race_helper() { - // Tests that when a downgrader hands off the "reader cloud" lock - // because of a contending reader, a writer can't race to get it - // instead, which would result in readers_and_writers. This tests - // the sync module rather than this one, but it's here because an - // rwarc gives us extra shared state to help check for the race. - // If you want to see this test fail, go to sync.rs and replace the - // line in RWlock::write_cond() that looks like: - // "blk(&Condvar { order: opt_lock, ..*cond })" - // with just "blk(cond)". - let x = ~RWARC(true); - let (wp, wc) = comm::stream(); - - // writer task - let xw = (*x).clone(); - do task::spawn { - do xw.write_cond |state, c| { - wc.send(()); // tell downgrader it's ok to go - c.wait(); - // The core of the test is here: the condvar reacquire path - // must involve order_lock, so that it cannot race with a reader - // trying to receive the "reader cloud lock hand-off". - *state = false; - } - } - - wp.recv(); // wait for writer to get in - - do x.write_downgrade |mut write_mode| { - do write_mode.write_cond |state, c| { - assert!(*state); - // make writer contend in the cond-reacquire path - c.signal(); - } - // make a reader task to trigger the "reader cloud lock" handoff - let xr = (*x).clone(); - let (rp, rc) = comm::stream(); - do task::spawn { - rc.send(()); - do xr.read |_state| { } - } - rp.recv(); // wait for reader task to exist - - let read_mode = x.downgrade(write_mode); - do read_mode.read |state| { - // if writer mistakenly got in, make sure it mutates state - // before we assert on it - for 5.times { task::yield(); } - // make sure writer didn't get in. - assert!(*state); - } - } - } - #[test] - fn test_rw_write_cond_downgrade_read_race() { - // Ideally the above test case would have yield statements in it that - // helped to expose the race nearly 100% of the time... but adding - // yields in the intuitively-right locations made it even less likely, - // and I wasn't sure why :( . This is a mediocre "next best" option. - for 8.times { test_rw_write_cond_downgrade_read_race_helper() } - } -} diff --git a/src/libextra/extra.rs b/src/libextra/extra.rs index 66ee52f53db31..13cfa9a1d1d41 100644 --- a/src/libextra/extra.rs +++ b/src/libextra/extra.rs @@ -58,8 +58,9 @@ pub mod rc; // Concurrency +#[path = "sync/mod.rs"] pub mod sync; -pub mod arc; + pub mod comm; pub mod future; pub mod task_pool; diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs deleted file mode 100644 index 632f5d7827d4c..0000000000000 --- a/src/libextra/sync.rs +++ /dev/null @@ -1,1444 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/** - * The concurrency primitives you know and love. - * - * Maybe once we have a "core exports x only to std" mechanism, these can be - * in std. - */ - - -use std::borrow; -use std::comm; -use std::task; -use std::unstable::sync::{Exclusive, exclusive, UnsafeAtomicRcBox}; -use std::unstable::atomics; -use std::util; - -/**************************************************************************** - * Internals - ****************************************************************************/ - -// Each waiting task receives on one of these. -#[doc(hidden)] -type WaitEnd = comm::PortOne<()>; -#[doc(hidden)] -type SignalEnd = comm::ChanOne<()>; -// A doubly-ended queue of waiting tasks. -#[doc(hidden)] -struct Waitqueue { head: comm::Port, - tail: comm::Chan } - -#[doc(hidden)] -fn new_waitqueue() -> Waitqueue { - let (block_head, block_tail) = comm::stream(); - Waitqueue { head: block_head, tail: block_tail } -} - -// Signals one live task from the queue. -#[doc(hidden)] -fn signal_waitqueue(q: &Waitqueue) -> bool { - // The peek is mandatory to make sure recv doesn't block. - if q.head.peek() { - // Pop and send a wakeup signal. If the waiter was killed, its port - // will have closed. Keep trying until we get a live task. - if comm::try_send_one(q.head.recv(), ()) { - true - } else { - signal_waitqueue(q) - } - } else { - false - } -} - -#[doc(hidden)] -fn broadcast_waitqueue(q: &Waitqueue) -> uint { - let mut count = 0; - while q.head.peek() { - if comm::try_send_one(q.head.recv(), ()) { - count += 1; - } - } - count -} - -// The building-block used to make semaphores, mutexes, and rwlocks. -#[doc(hidden)] -struct SemInner { - count: int, - waiters: Waitqueue, - // Can be either unit or another waitqueue. Some sems shouldn't come with - // a condition variable attached, others should. - blocked: Q -} - -#[doc(hidden)] -struct Sem(Exclusive>); - -#[doc(hidden)] -fn new_sem(count: int, q: Q) -> Sem { - Sem(exclusive(SemInner { - count: count, waiters: new_waitqueue(), blocked: q })) -} -#[doc(hidden)] -fn new_sem_and_signal(count: int, num_condvars: uint) - -> Sem<~[Waitqueue]> { - let mut queues = ~[]; - for num_condvars.times { - queues.push(new_waitqueue()); - } - new_sem(count, queues) -} - -#[doc(hidden)] -impl Sem { - pub fn acquire(&self) { - unsafe { - let mut waiter_nobe = None; - do (**self).with |state| { - state.count -= 1; - if state.count < 0 { - // Create waiter nobe. - let (WaitEnd, SignalEnd) = comm::oneshot(); - // Tell outer scope we need to block. - waiter_nobe = Some(WaitEnd); - // Enqueue ourself. - state.waiters.tail.send(SignalEnd); - } - } - // Uncomment if you wish to test for sem races. Not valgrind-friendly. - /* for 1000.times { task::yield(); } */ - // Need to wait outside the exclusive. - if waiter_nobe.is_some() { - let _ = comm::recv_one(waiter_nobe.unwrap()); - } - } - } - - pub fn release(&self) { - unsafe { - do (**self).with |state| { - state.count += 1; - if state.count <= 0 { - signal_waitqueue(&state.waiters); - } - } - } - } -} -// FIXME(#3154) move both copies of this into Sem, and unify the 2 structs -#[doc(hidden)] -impl Sem<()> { - pub fn access(&self, blk: &fn() -> U) -> U { - let mut release = None; - unsafe { - do task::unkillable { - self.acquire(); - release = Some(SemRelease(self)); - } - } - blk() - } -} - -#[doc(hidden)] -impl Sem<~[Waitqueue]> { - pub fn access_waitqueue(&self, blk: &fn() -> U) -> U { - let mut release = None; - unsafe { - do task::unkillable { - self.acquire(); - release = Some(SemAndSignalRelease(self)); - } - } - blk() - } -} - -// FIXME(#3588) should go inside of access() -#[doc(hidden)] -type SemRelease<'self> = SemReleaseGeneric<'self, ()>; -#[doc(hidden)] -type SemAndSignalRelease<'self> = SemReleaseGeneric<'self, ~[Waitqueue]>; -#[doc(hidden)] -struct SemReleaseGeneric<'self, Q> { sem: &'self Sem } - -#[doc(hidden)] -#[unsafe_destructor] -impl<'self, Q:Send> Drop for SemReleaseGeneric<'self, Q> { - fn drop(&self) { - self.sem.release(); - } -} - -#[doc(hidden)] -fn SemRelease<'r>(sem: &'r Sem<()>) -> SemRelease<'r> { - SemReleaseGeneric { - sem: sem - } -} - -#[doc(hidden)] -fn SemAndSignalRelease<'r>(sem: &'r Sem<~[Waitqueue]>) - -> SemAndSignalRelease<'r> { - SemReleaseGeneric { - sem: sem - } -} - -// FIXME(#3598): Want to use an Option down below, but we need a custom enum -// that's not polymorphic to get around the fact that lifetimes are invariant -// inside of type parameters. -enum ReacquireOrderLock<'self> { - Nothing, // c.c - Just(&'self Semaphore), -} - -/// A mechanism for atomic-unlock-and-deschedule blocking and signalling. -pub struct Condvar<'self> { - // The 'Sem' object associated with this condvar. This is the one that's - // atomically-unlocked-and-descheduled upon and reacquired during wakeup. - priv sem: &'self Sem<~[Waitqueue]>, - // This is (can be) an extra semaphore which is held around the reacquire - // operation on the first one. This is only used in cvars associated with - // rwlocks, and is needed to ensure that, when a downgrader is trying to - // hand off the access lock (which would be the first field, here), a 2nd - // writer waking up from a cvar wait can't race with a reader to steal it, - // See the comment in write_cond for more detail. - priv order: ReacquireOrderLock<'self>, -} - -#[unsafe_destructor] -impl<'self> Drop for Condvar<'self> { fn drop(&self) {} } - -impl<'self> Condvar<'self> { - /** - * Atomically drop the associated lock, and block until a signal is sent. - * - * # Failure - * A task which is killed (i.e., by linked failure with another task) - * while waiting on a condition variable will wake up, fail, and unlock - * the associated lock as it unwinds. - */ - pub fn wait(&self) { self.wait_on(0) } - - /** - * As wait(), but can specify which of multiple condition variables to - * wait on. Only a signal_on() or broadcast_on() with the same condvar_id - * will wake this thread. - * - * The associated lock must have been initialised with an appropriate - * number of condvars. The condvar_id must be between 0 and num_condvars-1 - * or else this call will fail. - * - * wait() is equivalent to wait_on(0). - */ - pub fn wait_on(&self, condvar_id: uint) { - // Create waiter nobe. - let (WaitEnd, SignalEnd) = comm::oneshot(); - let mut WaitEnd = Some(WaitEnd); - let mut SignalEnd = Some(SignalEnd); - let mut reacquire = None; - let mut out_of_bounds = None; - unsafe { - do task::unkillable { - // Release lock, 'atomically' enqueuing ourselves in so doing. - do (**self.sem).with |state| { - if condvar_id < state.blocked.len() { - // Drop the lock. - state.count += 1; - if state.count <= 0 { - signal_waitqueue(&state.waiters); - } - // Enqueue ourself to be woken up by a signaller. - let SignalEnd = SignalEnd.take_unwrap(); - state.blocked[condvar_id].tail.send(SignalEnd); - } else { - out_of_bounds = Some(state.blocked.len()); - } - } - - // If yield checks start getting inserted anywhere, we can be - // killed before or after enqueueing. Deciding whether to - // unkillably reacquire the lock needs to happen atomically - // wrt enqueuing. - if out_of_bounds.is_none() { - reacquire = Some(CondvarReacquire { sem: self.sem, - order: self.order }); - } - } - } - do check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()") { - // Unconditionally "block". (Might not actually block if a - // signaller already sent -- I mean 'unconditionally' in contrast - // with acquire().) - let _ = comm::recv_one(WaitEnd.take_unwrap()); - } - - // This is needed for a failing condition variable to reacquire the - // mutex during unwinding. As long as the wrapper (mutex, etc) is - // bounded in when it gets released, this shouldn't hang forever. - struct CondvarReacquire<'self> { - sem: &'self Sem<~[Waitqueue]>, - order: ReacquireOrderLock<'self>, - } - - #[unsafe_destructor] - impl<'self> Drop for CondvarReacquire<'self> { - fn drop(&self) { - unsafe { - // Needs to succeed, instead of itself dying. - do task::unkillable { - match self.order { - Just(lock) => do lock.access { - self.sem.acquire(); - }, - Nothing => { - self.sem.acquire(); - }, - } - } - } - } - } - } - - /// Wake up a blocked task. Returns false if there was no blocked task. - pub fn signal(&self) -> bool { self.signal_on(0) } - - /// As signal, but with a specified condvar_id. See wait_on. - pub fn signal_on(&self, condvar_id: uint) -> bool { - unsafe { - let mut out_of_bounds = None; - let mut result = false; - do (**self.sem).with |state| { - if condvar_id < state.blocked.len() { - result = signal_waitqueue(&state.blocked[condvar_id]); - } else { - out_of_bounds = Some(state.blocked.len()); - } - } - do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") { - result - } - } - } - - /// Wake up all blocked tasks. Returns the number of tasks woken. - pub fn broadcast(&self) -> uint { self.broadcast_on(0) } - - /// As broadcast, but with a specified condvar_id. See wait_on. - pub fn broadcast_on(&self, condvar_id: uint) -> uint { - let mut out_of_bounds = None; - let mut queue = None; - unsafe { - do (**self.sem).with |state| { - if condvar_id < state.blocked.len() { - // To avoid :broadcast_heavy, we make a new waitqueue, - // swap it out with the old one, and broadcast on the - // old one outside of the little-lock. - queue = Some(util::replace(&mut state.blocked[condvar_id], - new_waitqueue())); - } else { - out_of_bounds = Some(state.blocked.len()); - } - } - do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") { - let queue = queue.take_unwrap(); - broadcast_waitqueue(&queue) - } - } - } -} - -// Checks whether a condvar ID was out of bounds, and fails if so, or does -// something else next on success. -#[inline] -#[doc(hidden)] -fn check_cvar_bounds(out_of_bounds: Option, id: uint, act: &str, - blk: &fn() -> U) -> U { - match out_of_bounds { - Some(0) => - fail!("%s with illegal ID %u - this lock has no condvars!", act, id), - Some(length) => - fail!("%s with illegal ID %u - ID must be less than %u", act, id, length), - None => blk() - } -} - -#[doc(hidden)] -impl Sem<~[Waitqueue]> { - // The only other places that condvars get built are rwlock.write_cond() - // and rwlock_write_mode. - pub fn access_cond(&self, blk: &fn(c: &Condvar) -> U) -> U { - do self.access_waitqueue { - blk(&Condvar { sem: self, order: Nothing }) - } - } -} - -/**************************************************************************** - * Semaphores - ****************************************************************************/ - -/// A counting, blocking, bounded-waiting semaphore. -struct Semaphore { priv sem: Sem<()> } - -/// Create a new semaphore with the specified count. -pub fn semaphore(count: int) -> Semaphore { - Semaphore { sem: new_sem(count, ()) } -} - -impl Clone for Semaphore { - /// Create a new handle to the semaphore. - fn clone(&self) -> Semaphore { - Semaphore { sem: Sem((*self.sem).clone()) } - } -} - -impl Semaphore { - /** - * Acquire a resource represented by the semaphore. Blocks if necessary - * until resource(s) become available. - */ - pub fn acquire(&self) { (&self.sem).acquire() } - - /** - * Release a held resource represented by the semaphore. Wakes a blocked - * contending task, if any exist. Won't block the caller. - */ - pub fn release(&self) { (&self.sem).release() } - - /// Run a function with ownership of one of the semaphore's resources. - pub fn access(&self, blk: &fn() -> U) -> U { (&self.sem).access(blk) } -} - -/**************************************************************************** - * Mutexes - ****************************************************************************/ - -/** - * A blocking, bounded-waiting, mutual exclusion lock with an associated - * FIFO condition variable. - * - * # Failure - * A task which fails while holding a mutex will unlock the mutex as it - * unwinds. - */ -pub struct Mutex { priv sem: Sem<~[Waitqueue]> } - -/// Create a new mutex, with one associated condvar. -pub fn Mutex() -> Mutex { mutex_with_condvars(1) } -/** - * Create a new mutex, with a specified number of associated condvars. This - * will allow calling wait_on/signal_on/broadcast_on with condvar IDs between - * 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be allowed but - * any operations on the condvar will fail.) - */ -pub fn mutex_with_condvars(num_condvars: uint) -> Mutex { - Mutex { sem: new_sem_and_signal(1, num_condvars) } -} - -impl Clone for Mutex { - /// Create a new handle to the mutex. - fn clone(&self) -> Mutex { Mutex { sem: Sem((*self.sem).clone()) } } -} - -impl Mutex { - /// Run a function with ownership of the mutex. - pub fn lock(&self, blk: &fn() -> U) -> U { - (&self.sem).access_waitqueue(blk) - } - - /// Run a function with ownership of the mutex and a handle to a condvar. - pub fn lock_cond(&self, blk: &fn(c: &Condvar) -> U) -> U { - (&self.sem).access_cond(blk) - } -} - -/**************************************************************************** - * Reader-writer locks - ****************************************************************************/ - -// NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem - -#[doc(hidden)] -struct RWlockInner { - // You might ask, "Why don't you need to use an atomic for the mode flag?" - // This flag affects the behaviour of readers (for plain readers, they - // assert on it; for downgraders, they use it to decide which mode to - // unlock for). Consider that the flag is only unset when the very last - // reader exits; therefore, it can never be unset during a reader/reader - // (or reader/downgrader) race. - // By the way, if we didn't care about the assert in the read unlock path, - // we could instead store the mode flag in write_downgrade's stack frame, - // and have the downgrade tokens store a borrowed pointer to it. - read_mode: bool, - // The only way the count flag is ever accessed is with xadd. Since it is - // a read-modify-write operation, multiple xadds on different cores will - // always be consistent with respect to each other, so a monotonic/relaxed - // consistency ordering suffices (i.e., no extra barriers are needed). - // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use - // acquire/release orderings superfluously. Change these someday. - read_count: atomics::AtomicUint, -} - -/** - * A blocking, no-starvation, reader-writer lock with an associated condvar. - * - * # Failure - * A task which fails while holding an rwlock will unlock the rwlock as it - * unwinds. - */ -pub struct RWlock { - priv order_lock: Semaphore, - priv access_lock: Sem<~[Waitqueue]>, - priv state: UnsafeAtomicRcBox, -} - -/// Create a new rwlock, with one associated condvar. -pub fn RWlock() -> RWlock { rwlock_with_condvars(1) } - -/** - * Create a new rwlock, with a specified number of associated condvars. - * Similar to mutex_with_condvars. - */ -pub fn rwlock_with_condvars(num_condvars: uint) -> RWlock { - let state = UnsafeAtomicRcBox::new(RWlockInner { - read_mode: false, - read_count: atomics::AtomicUint::new(0), - }); - RWlock { order_lock: semaphore(1), - access_lock: new_sem_and_signal(1, num_condvars), - state: state, } -} - -impl RWlock { - /// Create a new handle to the rwlock. - pub fn clone(&self) -> RWlock { - RWlock { order_lock: (&(self.order_lock)).clone(), - access_lock: Sem((*self.access_lock).clone()), - state: self.state.clone() } - } - - /** - * Run a function with the rwlock in read mode. Calls to 'read' from other - * tasks may run concurrently with this one. - */ - pub fn read(&self, blk: &fn() -> U) -> U { - let mut release = None; - unsafe { - do task::unkillable { - do (&self.order_lock).access { - let state = &mut *self.state.get(); - let old_count = state.read_count.fetch_add(1, atomics::Acquire); - if old_count == 0 { - (&self.access_lock).acquire(); - state.read_mode = true; - } - } - release = Some(RWlockReleaseRead(self)); - } - } - blk() - } - - /** - * Run a function with the rwlock in write mode. No calls to 'read' or - * 'write' from other tasks will run concurrently with this one. - */ - pub fn write(&self, blk: &fn() -> U) -> U { - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access_waitqueue { - (&self.order_lock).release(); - do task::rekillable { - blk() - } - } - } - } - } - - /** - * As write(), but also with a handle to a condvar. Waiting on this - * condvar will allow readers and writers alike to take the rwlock before - * the waiting task is signalled. (Note: a writer that waited and then - * was signalled might reacquire the lock before other waiting writers.) - */ - pub fn write_cond(&self, blk: &fn(c: &Condvar) -> U) -> U { - // It's important to thread our order lock into the condvar, so that - // when a cond.wait() wakes up, it uses it while reacquiring the - // access lock. If we permitted a waking-up writer to "cut in line", - // there could arise a subtle race when a downgrader attempts to hand - // off the reader cloud lock to a waiting reader. This race is tested - // in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like: - // T1 (writer) T2 (downgrader) T3 (reader) - // [in cond.wait()] - // [locks for writing] - // [holds access_lock] - // [is signalled, perhaps by - // downgrader or a 4th thread] - // tries to lock access(!) - // lock order_lock - // xadd read_count[0->1] - // tries to lock access - // [downgrade] - // xadd read_count[1->2] - // unlock access - // Since T1 contended on the access lock before T3 did, it will steal - // the lock handoff. Adding order_lock in the condvar reacquire path - // solves this because T1 will hold order_lock while waiting on access, - // which will cause T3 to have to wait until T1 finishes its write, - // which can't happen until T2 finishes the downgrade-read entirely. - // The astute reader will also note that making waking writers use the - // order_lock is better for not starving readers. - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access_cond |cond| { - (&self.order_lock).release(); - do task::rekillable { - let opt_lock = Just(&self.order_lock); - blk(&Condvar { order: opt_lock, ..*cond }) - } - } - } - } - } - - /** - * As write(), but with the ability to atomically 'downgrade' the lock; - * i.e., to become a reader without letting other writers get the lock in - * the meantime (such as unlocking and then re-locking as a reader would - * do). The block takes a "write mode token" argument, which can be - * transformed into a "read mode token" by calling downgrade(). Example: - * - * # Example - * - * ~~~ {.rust} - * do lock.write_downgrade |mut write_token| { - * do write_token.write_cond |condvar| { - * ... exclusive access ... - * } - * let read_token = lock.downgrade(write_token); - * do read_token.read { - * ... shared access ... - * } - * } - * ~~~ - */ - pub fn write_downgrade(&self, blk: &fn(v: RWlockWriteMode) -> U) -> U { - // Implementation slightly different from the slicker 'write's above. - // The exit path is conditional on whether the caller downgrades. - let mut _release = None; - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - (&self.access_lock).acquire(); - (&self.order_lock).release(); - } - _release = Some(RWlockReleaseDowngrade(self)); - } - blk(RWlockWriteMode { lock: self }) - } - - /// To be called inside of the write_downgrade block. - pub fn downgrade<'a>(&self, token: RWlockWriteMode<'a>) - -> RWlockReadMode<'a> { - if !borrow::ref_eq(self, token.lock) { - fail!("Can't downgrade() with a different rwlock's write_mode!"); - } - unsafe { - do task::unkillable { - let state = &mut *self.state.get(); - assert!(!state.read_mode); - state.read_mode = true; - // If a reader attempts to enter at this point, both the - // downgrader and reader will set the mode flag. This is fine. - let old_count = state.read_count.fetch_add(1, atomics::Release); - // If another reader was already blocking, we need to hand-off - // the "reader cloud" access lock to them. - if old_count != 0 { - // Guaranteed not to let another writer in, because - // another reader was holding the order_lock. Hence they - // must be the one to get the access_lock (because all - // access_locks are acquired with order_lock held). See - // the comment in write_cond for more justification. - (&self.access_lock).release(); - } - } - } - RWlockReadMode { lock: token.lock } - } -} - -// FIXME(#3588) should go inside of read() -#[doc(hidden)] -struct RWlockReleaseRead<'self> { - lock: &'self RWlock, -} - -#[doc(hidden)] -#[unsafe_destructor] -impl<'self> Drop for RWlockReleaseRead<'self> { - fn drop(&self) { - unsafe { - do task::unkillable { - let state = &mut *self.lock.state.get(); - assert!(state.read_mode); - let old_count = state.read_count.fetch_sub(1, atomics::Release); - assert!(old_count > 0); - if old_count == 1 { - state.read_mode = false; - // Note: this release used to be outside of a locked access - // to exclusive-protected state. If this code is ever - // converted back to such (instead of using atomic ops), - // this access MUST NOT go inside the exclusive access. - (&self.lock.access_lock).release(); - } - } - } - } -} - -#[doc(hidden)] -fn RWlockReleaseRead<'r>(lock: &'r RWlock) -> RWlockReleaseRead<'r> { - RWlockReleaseRead { - lock: lock - } -} - -// FIXME(#3588) should go inside of downgrade() -#[doc(hidden)] -#[unsafe_destructor] -struct RWlockReleaseDowngrade<'self> { - lock: &'self RWlock, -} - -#[doc(hidden)] -#[unsafe_destructor] -impl<'self> Drop for RWlockReleaseDowngrade<'self> { - fn drop(&self) { - unsafe { - do task::unkillable { - let writer_or_last_reader; - // Check if we're releasing from read mode or from write mode. - let state = &mut *self.lock.state.get(); - if state.read_mode { - // Releasing from read mode. - let old_count = state.read_count.fetch_sub(1, atomics::Release); - assert!(old_count > 0); - // Check if other readers remain. - if old_count == 1 { - // Case 1: Writer downgraded & was the last reader - writer_or_last_reader = true; - state.read_mode = false; - } else { - // Case 2: Writer downgraded & was not the last reader - writer_or_last_reader = false; - } - } else { - // Case 3: Writer did not downgrade - writer_or_last_reader = true; - } - if writer_or_last_reader { - // Nobody left inside; release the "reader cloud" lock. - (&self.lock.access_lock).release(); - } - } - } - } -} - -#[doc(hidden)] -fn RWlockReleaseDowngrade<'r>(lock: &'r RWlock) - -> RWlockReleaseDowngrade<'r> { - RWlockReleaseDowngrade { - lock: lock - } -} - -/// The "write permission" token used for rwlock.write_downgrade(). -pub struct RWlockWriteMode<'self> { priv lock: &'self RWlock } -#[unsafe_destructor] -impl<'self> Drop for RWlockWriteMode<'self> { fn drop(&self) {} } - -/// The "read permission" token used for rwlock.write_downgrade(). -pub struct RWlockReadMode<'self> { priv lock: &'self RWlock } -#[unsafe_destructor] -impl<'self> Drop for RWlockReadMode<'self> { fn drop(&self) {} } - -impl<'self> RWlockWriteMode<'self> { - /// Access the pre-downgrade rwlock in write mode. - pub fn write(&self, blk: &fn() -> U) -> U { blk() } - /// Access the pre-downgrade rwlock in write mode with a condvar. - pub fn write_cond(&self, blk: &fn(c: &Condvar) -> U) -> U { - // Need to make the condvar use the order lock when reacquiring the - // access lock. See comment in RWlock::write_cond for why. - blk(&Condvar { sem: &self.lock.access_lock, - order: Just(&self.lock.order_lock), }) - } -} - -impl<'self> RWlockReadMode<'self> { - /// Access the post-downgrade rwlock in read mode. - pub fn read(&self, blk: &fn() -> U) -> U { blk() } -} - -/**************************************************************************** - * Tests - ****************************************************************************/ - -#[cfg(test)] -mod tests { - - use sync::*; - - use std::cast; - use std::cell::Cell; - use std::comm; - use std::result; - use std::task; - - /************************************************************************ - * Semaphore tests - ************************************************************************/ - #[test] - fn test_sem_acquire_release() { - let s = ~semaphore(1); - s.acquire(); - s.release(); - s.acquire(); - } - #[test] - fn test_sem_basic() { - let s = ~semaphore(1); - do s.access { } - } - #[test] - fn test_sem_as_mutex() { - let s = ~semaphore(1); - let s2 = ~s.clone(); - do task::spawn || { - do s2.access { - for 5.times { task::yield(); } - } - } - do s.access { - for 5.times { task::yield(); } - } - } - #[test] - fn test_sem_as_cvar() { - /* Child waits and parent signals */ - let (p,c) = comm::stream(); - let s = ~semaphore(0); - let s2 = ~s.clone(); - do task::spawn || { - s2.acquire(); - c.send(()); - } - for 5.times { task::yield(); } - s.release(); - let _ = p.recv(); - - /* Parent waits and child signals */ - let (p,c) = comm::stream(); - let s = ~semaphore(0); - let s2 = ~s.clone(); - do task::spawn || { - for 5.times { task::yield(); } - s2.release(); - let _ = p.recv(); - } - s.acquire(); - c.send(()); - } - #[test] - fn test_sem_multi_resource() { - // Parent and child both get in the critical section at the same - // time, and shake hands. - let s = ~semaphore(2); - let s2 = ~s.clone(); - let (p1,c1) = comm::stream(); - let (p2,c2) = comm::stream(); - do task::spawn || { - do s2.access { - let _ = p2.recv(); - c1.send(()); - } - } - do s.access { - c2.send(()); - let _ = p1.recv(); - } - } - #[test] - fn test_sem_runtime_friendly_blocking() { - // Force the runtime to schedule two threads on the same sched_loop. - // When one blocks, it should schedule the other one. - do task::spawn_sched(task::ManualThreads(1)) { - let s = ~semaphore(1); - let s2 = ~s.clone(); - let (p,c) = comm::stream(); - let child_data = Cell::new((s2, c)); - do s.access { - let (s2, c) = child_data.take(); - do task::spawn || { - c.send(()); - do s2.access { } - c.send(()); - } - let _ = p.recv(); // wait for child to come alive - for 5.times { task::yield(); } // let the child contend - } - let _ = p.recv(); // wait for child to be done - } - } - /************************************************************************ - * Mutex tests - ************************************************************************/ - #[test] - fn test_mutex_lock() { - // Unsafely achieve shared state, and do the textbook - // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance. - let (p,c) = comm::stream(); - let m = ~Mutex(); - let m2 = m.clone(); - let mut sharedstate = ~0; - { - let ptr: *int = &*sharedstate; - do task::spawn || { - let sharedstate: &mut int = - unsafe { cast::transmute(ptr) }; - access_shared(sharedstate, m2, 10); - c.send(()); - - } - } - { - access_shared(sharedstate, m, 10); - let _ = p.recv(); - - assert_eq!(*sharedstate, 20); - } - - fn access_shared(sharedstate: &mut int, m: &Mutex, n: uint) { - for n.times { - do m.lock { - let oldval = *sharedstate; - task::yield(); - *sharedstate = oldval + 1; - } - } - } - } - #[test] - fn test_mutex_cond_wait() { - let m = ~Mutex(); - - // Child wakes up parent - do m.lock_cond |cond| { - let m2 = ~m.clone(); - do task::spawn || { - do m2.lock_cond |cond| { - let woken = cond.signal(); - assert!(woken); - } - } - cond.wait(); - } - // Parent wakes up child - let (port,chan) = comm::stream(); - let m3 = ~m.clone(); - do task::spawn || { - do m3.lock_cond |cond| { - chan.send(()); - cond.wait(); - chan.send(()); - } - } - let _ = port.recv(); // Wait until child gets in the mutex - do m.lock_cond |cond| { - let woken = cond.signal(); - assert!(woken); - } - let _ = port.recv(); // Wait until child wakes up - } - #[cfg(test)] - fn test_mutex_cond_broadcast_helper(num_waiters: uint) { - let m = ~Mutex(); - let mut ports = ~[]; - - for num_waiters.times { - let mi = ~m.clone(); - let (port, chan) = comm::stream(); - ports.push(port); - do task::spawn || { - do mi.lock_cond |cond| { - chan.send(()); - cond.wait(); - chan.send(()); - } - } - } - - // wait until all children get in the mutex - for ports.iter().advance |port| { let _ = port.recv(); } - do m.lock_cond |cond| { - let num_woken = cond.broadcast(); - assert_eq!(num_woken, num_waiters); - } - // wait until all children wake up - for ports.iter().advance |port| { let _ = port.recv(); } - } - #[test] - fn test_mutex_cond_broadcast() { - test_mutex_cond_broadcast_helper(12); - } - #[test] - fn test_mutex_cond_broadcast_none() { - test_mutex_cond_broadcast_helper(0); - } - #[test] - fn test_mutex_cond_no_waiter() { - let m = ~Mutex(); - let m2 = ~m.clone(); - do task::try || { - do m.lock_cond |_x| { } - }; - do m2.lock_cond |cond| { - assert!(!cond.signal()); - } - } - #[test] #[ignore(cfg(windows))] - fn test_mutex_killed_simple() { - // Mutex must get automatically unlocked if failed/killed within. - let m = ~Mutex(); - let m2 = ~m.clone(); - - let result: result::Result<(),()> = do task::try || { - do m2.lock { - fail!(); - } - }; - assert!(result.is_err()); - // child task must have finished by the time try returns - do m.lock { } - } - #[test] #[ignore(cfg(windows))] - fn test_mutex_killed_cond() { - // Getting killed during cond wait must not corrupt the mutex while - // unwinding (e.g. double unlock). - let m = ~Mutex(); - let m2 = ~m.clone(); - - let result: result::Result<(),()> = do task::try || { - let (p,c) = comm::stream(); - do task::spawn || { // linked - let _ = p.recv(); // wait for sibling to get in the mutex - task::yield(); - fail!(); - } - do m2.lock_cond |cond| { - c.send(()); // tell sibling go ahead - cond.wait(); // block forever - } - }; - assert!(result.is_err()); - // child task must have finished by the time try returns - do m.lock_cond |cond| { - let woken = cond.signal(); - assert!(!woken); - } - } - #[test] #[ignore(cfg(windows))] - fn test_mutex_killed_broadcast() { - let m = ~Mutex(); - let m2 = ~m.clone(); - let (p,c) = comm::stream(); - - let result: result::Result<(),()> = do task::try || { - let mut sibling_convos = ~[]; - for 2.times { - let (p,c) = comm::stream(); - let c = Cell::new(c); - sibling_convos.push(p); - let mi = ~m2.clone(); - // spawn sibling task - do task::spawn { // linked - do mi.lock_cond |cond| { - let c = c.take(); - c.send(()); // tell sibling to go ahead - let _z = SendOnFailure(c); - cond.wait(); // block forever - } - } - } - for sibling_convos.iter().advance |p| { - let _ = p.recv(); // wait for sibling to get in the mutex - } - do m2.lock { } - c.send(sibling_convos); // let parent wait on all children - fail!(); - }; - assert!(result.is_err()); - // child task must have finished by the time try returns - let r = p.recv(); - for r.iter().advance |p| { p.recv(); } // wait on all its siblings - do m.lock_cond |cond| { - let woken = cond.broadcast(); - assert_eq!(woken, 0); - } - struct SendOnFailure { - c: comm::Chan<()>, - } - - impl Drop for SendOnFailure { - fn drop(&self) { - self.c.send(()); - } - } - - fn SendOnFailure(c: comm::Chan<()>) -> SendOnFailure { - SendOnFailure { - c: c - } - } - } - #[test] - fn test_mutex_cond_signal_on_0() { - // Tests that signal_on(0) is equivalent to signal(). - let m = ~Mutex(); - do m.lock_cond |cond| { - let m2 = ~m.clone(); - do task::spawn || { - do m2.lock_cond |cond| { - cond.signal_on(0); - } - } - cond.wait(); - } - } - #[test] #[ignore(cfg(windows))] - fn test_mutex_different_conds() { - let result = do task::try { - let m = ~mutex_with_condvars(2); - let m2 = ~m.clone(); - let (p,c) = comm::stream(); - do task::spawn || { - do m2.lock_cond |cond| { - c.send(()); - cond.wait_on(1); - } - } - let _ = p.recv(); - do m.lock_cond |cond| { - if !cond.signal_on(0) { - fail!(); // success; punt sibling awake. - } - } - }; - assert!(result.is_err()); - } - #[test] #[ignore(cfg(windows))] - fn test_mutex_no_condvars() { - let result = do task::try { - let m = ~mutex_with_condvars(0); - do m.lock_cond |cond| { cond.wait(); } - }; - assert!(result.is_err()); - let result = do task::try { - let m = ~mutex_with_condvars(0); - do m.lock_cond |cond| { cond.signal(); } - }; - assert!(result.is_err()); - let result = do task::try { - let m = ~mutex_with_condvars(0); - do m.lock_cond |cond| { cond.broadcast(); } - }; - assert!(result.is_err()); - } - /************************************************************************ - * Reader/writer lock tests - ************************************************************************/ - #[cfg(test)] - pub enum RWlockMode { Read, Write, Downgrade, DowngradeRead } - #[cfg(test)] - fn lock_rwlock_in_mode(x: &RWlock, mode: RWlockMode, blk: &fn()) { - match mode { - Read => x.read(blk), - Write => x.write(blk), - Downgrade => - do x.write_downgrade |mode| { - do mode.write { blk() }; - }, - DowngradeRead => - do x.write_downgrade |mode| { - let mode = x.downgrade(mode); - do mode.read { blk() }; - }, - } - } - #[cfg(test)] - fn test_rwlock_exclusion(x: ~RWlock, - mode1: RWlockMode, - mode2: RWlockMode) { - // Test mutual exclusion between readers and writers. Just like the - // mutex mutual exclusion test, a ways above. - let (p,c) = comm::stream(); - let x2 = (*x).clone(); - let mut sharedstate = ~0; - { - let ptr: *int = &*sharedstate; - do task::spawn || { - let sharedstate: &mut int = - unsafe { cast::transmute(ptr) }; - access_shared(sharedstate, &x2, mode1, 10); - c.send(()); - } - } - { - access_shared(sharedstate, x, mode2, 10); - let _ = p.recv(); - - assert_eq!(*sharedstate, 20); - } - - fn access_shared(sharedstate: &mut int, x: &RWlock, mode: RWlockMode, - n: uint) { - for n.times { - do lock_rwlock_in_mode(x, mode) { - let oldval = *sharedstate; - task::yield(); - *sharedstate = oldval + 1; - } - } - } - } - #[test] - fn test_rwlock_readers_wont_modify_the_data() { - test_rwlock_exclusion(~RWlock(), Read, Write); - test_rwlock_exclusion(~RWlock(), Write, Read); - test_rwlock_exclusion(~RWlock(), Read, Downgrade); - test_rwlock_exclusion(~RWlock(), Downgrade, Read); - } - #[test] - fn test_rwlock_writers_and_writers() { - test_rwlock_exclusion(~RWlock(), Write, Write); - test_rwlock_exclusion(~RWlock(), Write, Downgrade); - test_rwlock_exclusion(~RWlock(), Downgrade, Write); - test_rwlock_exclusion(~RWlock(), Downgrade, Downgrade); - } - #[cfg(test)] - fn test_rwlock_handshake(x: ~RWlock, - mode1: RWlockMode, - mode2: RWlockMode, - make_mode2_go_first: bool) { - // Much like sem_multi_resource. - let x2 = (*x).clone(); - let (p1,c1) = comm::stream(); - let (p2,c2) = comm::stream(); - do task::spawn || { - if !make_mode2_go_first { - let _ = p2.recv(); // parent sends to us once it locks, or ... - } - do lock_rwlock_in_mode(&x2, mode2) { - if make_mode2_go_first { - c1.send(()); // ... we send to it once we lock - } - let _ = p2.recv(); - c1.send(()); - } - } - if make_mode2_go_first { - let _ = p1.recv(); // child sends to us once it locks, or ... - } - do lock_rwlock_in_mode(x, mode1) { - if !make_mode2_go_first { - c2.send(()); // ... we send to it once we lock - } - c2.send(()); - let _ = p1.recv(); - } - } - #[test] - fn test_rwlock_readers_and_readers() { - test_rwlock_handshake(~RWlock(), Read, Read, false); - // The downgrader needs to get in before the reader gets in, otherwise - // they cannot end up reading at the same time. - test_rwlock_handshake(~RWlock(), DowngradeRead, Read, false); - test_rwlock_handshake(~RWlock(), Read, DowngradeRead, true); - // Two downgrade_reads can never both end up reading at the same time. - } - #[test] - fn test_rwlock_downgrade_unlock() { - // Tests that downgrade can unlock the lock in both modes - let x = ~RWlock(); - do lock_rwlock_in_mode(x, Downgrade) { } - test_rwlock_handshake(x, Read, Read, false); - let y = ~RWlock(); - do lock_rwlock_in_mode(y, DowngradeRead) { } - test_rwlock_exclusion(y, Write, Write); - } - #[test] - fn test_rwlock_read_recursive() { - let x = ~RWlock(); - do x.read { do x.read { } } - } - #[test] - fn test_rwlock_cond_wait() { - // As test_mutex_cond_wait above. - let x = ~RWlock(); - - // Child wakes up parent - do x.write_cond |cond| { - let x2 = (*x).clone(); - do task::spawn || { - do x2.write_cond |cond| { - let woken = cond.signal(); - assert!(woken); - } - } - cond.wait(); - } - // Parent wakes up child - let (port,chan) = comm::stream(); - let x3 = (*x).clone(); - do task::spawn || { - do x3.write_cond |cond| { - chan.send(()); - cond.wait(); - chan.send(()); - } - } - let _ = port.recv(); // Wait until child gets in the rwlock - do x.read { } // Must be able to get in as a reader in the meantime - do x.write_cond |cond| { // Or as another writer - let woken = cond.signal(); - assert!(woken); - } - let _ = port.recv(); // Wait until child wakes up - do x.read { } // Just for good measure - } - #[cfg(test)] - fn test_rwlock_cond_broadcast_helper(num_waiters: uint, - dg1: bool, - dg2: bool) { - // Much like the mutex broadcast test. Downgrade-enabled. - fn lock_cond(x: &RWlock, downgrade: bool, blk: &fn(c: &Condvar)) { - if downgrade { - do x.write_downgrade |mode| { - do mode.write_cond |c| { blk(c) } - } - } else { - do x.write_cond |c| { blk(c) } - } - } - let x = ~RWlock(); - let mut ports = ~[]; - - for num_waiters.times { - let xi = (*x).clone(); - let (port, chan) = comm::stream(); - ports.push(port); - do task::spawn || { - do lock_cond(&xi, dg1) |cond| { - chan.send(()); - cond.wait(); - chan.send(()); - } - } - } - - // wait until all children get in the mutex - for ports.iter().advance |port| { let _ = port.recv(); } - do lock_cond(x, dg2) |cond| { - let num_woken = cond.broadcast(); - assert_eq!(num_woken, num_waiters); - } - // wait until all children wake up - for ports.iter().advance |port| { let _ = port.recv(); } - } - #[test] - fn test_rwlock_cond_broadcast() { - test_rwlock_cond_broadcast_helper(0, true, true); - test_rwlock_cond_broadcast_helper(0, true, false); - test_rwlock_cond_broadcast_helper(0, false, true); - test_rwlock_cond_broadcast_helper(0, false, false); - test_rwlock_cond_broadcast_helper(12, true, true); - test_rwlock_cond_broadcast_helper(12, true, false); - test_rwlock_cond_broadcast_helper(12, false, true); - test_rwlock_cond_broadcast_helper(12, false, false); - } - #[cfg(test)] #[ignore(cfg(windows))] - fn rwlock_kill_helper(mode1: RWlockMode, mode2: RWlockMode) { - // Mutex must get automatically unlocked if failed/killed within. - let x = ~RWlock(); - let x2 = (*x).clone(); - - let result: result::Result<(),()> = do task::try || { - do lock_rwlock_in_mode(&x2, mode1) { - fail!(); - } - }; - assert!(result.is_err()); - // child task must have finished by the time try returns - do lock_rwlock_in_mode(x, mode2) { } - } - #[test] #[ignore(cfg(windows))] - fn test_rwlock_reader_killed_writer() { - rwlock_kill_helper(Read, Write); - } - #[test] #[ignore(cfg(windows))] - fn test_rwlock_writer_killed_reader() { - rwlock_kill_helper(Write,Read ); - } - #[test] #[ignore(cfg(windows))] - fn test_rwlock_reader_killed_reader() { - rwlock_kill_helper(Read, Read ); - } - #[test] #[ignore(cfg(windows))] - fn test_rwlock_writer_killed_writer() { - rwlock_kill_helper(Write,Write); - } - #[test] #[ignore(cfg(windows))] - fn test_rwlock_kill_downgrader() { - rwlock_kill_helper(Downgrade, Read); - rwlock_kill_helper(Read, Downgrade); - rwlock_kill_helper(Downgrade, Write); - rwlock_kill_helper(Write, Downgrade); - rwlock_kill_helper(DowngradeRead, Read); - rwlock_kill_helper(Read, DowngradeRead); - rwlock_kill_helper(DowngradeRead, Write); - rwlock_kill_helper(Write, DowngradeRead); - rwlock_kill_helper(DowngradeRead, Downgrade); - rwlock_kill_helper(DowngradeRead, Downgrade); - rwlock_kill_helper(Downgrade, DowngradeRead); - rwlock_kill_helper(Downgrade, DowngradeRead); - } - #[test] #[should_fail] #[ignore(cfg(windows))] - fn test_rwlock_downgrade_cant_swap() { - // Tests that you can't downgrade with a different rwlock's token. - let x = ~RWlock(); - let y = ~RWlock(); - do x.write_downgrade |xwrite| { - let mut xopt = Some(xwrite); - do y.write_downgrade |_ywrite| { - y.downgrade(xopt.take_unwrap()); - error!("oops, y.downgrade(x) should have failed!"); - } - } - } -} diff --git a/src/libextra/sync/arc.rs b/src/libextra/sync/arc.rs new file mode 100644 index 0000000000000..f686a2650d593 --- /dev/null +++ b/src/libextra/sync/arc.rs @@ -0,0 +1,74 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::sync::UnsafeAtomicRcBox; + + +/// An atomically reference counted wrapper for shared immutable state. +pub struct Arc { priv contents: UnsafeAtomicRcBox } + +impl Arc { + /// Create an atomically reference counted wrapper. + #[inline] + pub fn new(value: T) -> Arc { + Arc { contents: UnsafeAtomicRcBox::new(value) } + } + + /** + * Access the underlying data in an atomically reference counted + * wrapper. + */ + #[inline] + pub fn get<'r>(&'r self) -> &'r T { unsafe { &*self.contents.get_immut() } } +} + +/** + * Duplicate an atomically reference counted wrapper. + * + * The resulting two `arc` objects will point to the same underlying data + * object. However, one of the `arc` objects can be sent to another task, + * allowing them to share the underlying data. +*/ +impl Clone for Arc { + #[inline] + fn clone(&self) -> Arc { + Arc { contents: self.contents.clone() } + } +} + + +#[test] +fn manually_share_arc() { + use std::comm; + use std::task; + + let v = ~[1u, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let arc_v = Arc::new(v); + + let (p, c) = comm::stream(); + + do task::spawn { + let p = comm::PortSet::new(); + c.send(p.chan()); + + let arc_v: Arc<~[uint]> = p.recv(); + + let v = arc_v.get().clone(); + assert_eq!(v[3], 4); + }; + + let c = p.recv(); + c.send(arc_v.clone()); + + assert_eq!(arc_v.get()[2], 3); + assert_eq!(arc_v.get()[4], 5); + + info!(arc_v); +} \ No newline at end of file diff --git a/src/libextra/sync/condition.rs b/src/libextra/sync/condition.rs new file mode 100644 index 0000000000000..a3c61b0151086 --- /dev/null +++ b/src/libextra/sync/condition.rs @@ -0,0 +1,55 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::Cell; + +use sync::wait_queue::{WaitQueue, WaitEvent}; +use sync::unlock::{Unlock, ScopedUnlock}; + + +/// A wait queue that is strongly associated with a particular lock. +#[deriving(Clone)] +pub struct Condition { + priv wait_queue: WaitQueue, + priv lock: T, +} + +impl > Condition { + /// Create a condition + #[inline] + pub fn new(lock: T) -> Condition { + Condition { wait_queue: WaitQueue::new(), lock: lock } + } + + /// Wake up a blocked task. Returns false if there was no blocked + /// task. + #[inline] + pub fn signal(&self) -> bool { + self.wait_queue.signal() + } + + /// Wake up all tasks waiting on the condition. + #[inline] + pub fn broadcast(&self) -> uint { + self.wait_queue.broadcast() + } + + /// Wait on the condition, and unlock the associated lock. + #[inline] + pub fn wait(&mut self) { + let wait_event = self.wait_queue.wait_event(); + self.lock.unlock_with(wait_event, wait_on) + } +} + +#[inline] +fn wait_on(wait_event: WaitEvent) { + wait_event.wait() +} diff --git a/src/libextra/sync/mod.rs b/src/libextra/sync/mod.rs new file mode 100644 index 0000000000000..9c627cccc7134 --- /dev/null +++ b/src/libextra/sync/mod.rs @@ -0,0 +1,36 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::unlock::Unlock; +pub use self::arc::Arc; +pub use self::wait_queue::WaitQueue; +pub use self::condition::Condition; +pub use self::semaphore::Semaphore; + +pub use self::mutex::{Mutex, Lock}; +pub use self::rwlock::{RWLock, ReadLock, WriteLock}; + +pub use self::shared_mut::mutex_arc::{MutexArc, Locked}; +pub use self::shared_mut::rwarc::{RWArc, ReadLocked, WriteLocked}; + + +mod unlock; +mod arc; +mod wait_queue; +mod condition; +mod semaphore; + +mod mutex; +mod rwlock; + +mod shared_mut { + mod mutex_arc; + mod rwarc; +} \ No newline at end of file diff --git a/src/libextra/sync/mutex.rs b/src/libextra/sync/mutex.rs new file mode 100644 index 0000000000000..9a2c154da309f --- /dev/null +++ b/src/libextra/sync/mutex.rs @@ -0,0 +1,357 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::finally::Finally; +use std::cell::Cell; + +use sync::semaphore::Semaphore; +use sync::unlock::Unlock; + + +/// A blocking, bounded-waiting, mutual exclusion lock. +#[deriving(Clone)] +pub struct Mutex { + priv semaphore: Semaphore +} + +impl Mutex { + /// Create a mutex. + #[inline] + pub fn new() -> Mutex { + Mutex { semaphore: Semaphore::new() } + } + + /** + * A convenience function to wrap the more complicated (but more + * powerful lock method.) Obtains a lock, and then invokes the blk + * argument. + */ + #[inline] + pub fn with_lock(&self, blk: &fn() -> U) -> U { + let _lock = self.lock(); + blk() + } + + + /** + * Obtain a mutual exclusion lock on the mutex. + * + * No other code can obtain a lock until the returned lock is + * released. If other code has obtained a lock this method blocks + * until the lock is released, and then obtains a lock. + */ + #[inline] + pub fn lock<'r>(&'r self) -> Lock<'r> { + self.acquire_lock(); + Lock { mutex: self } + } + + #[inline] + fn acquire_lock(&self) { self.semaphore.wait(); } + + #[inline] + fn release_lock(&self) { self.semaphore.signal(); } +} + + +/// A handle that guarantees that an associated mutex is in a locked +/// (exclusively accessed) state. +pub struct Lock<'self> { priv mutex: &'self Mutex } + +#[unsafe_destructor] +impl <'self> Drop for Lock<'self> { + // Don't inline this due to issue #7793 + fn drop(&self) { + self.mutex.release_lock() + } +} + +impl <'self> Unlock for Lock<'self> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> U) -> U { + self.mutex.release_lock(); + let cell = Cell::new(blk); + do (|| cell.take()()).finally { + self.mutex.acquire_lock() + } + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + use std::comm; + use std::task; + use std::vec; + + use sync::wait_queue::WaitQueue; + + + #[test] + fn test_mutex_lock() { + // Unsafely achieve shared state, and do the textbook + // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance. + let (p, c) = comm::stream(); + let mutex = Mutex::new(); + let mutex_2 = mutex.clone(); + let mut sharedstate = ~0; + { + let ptr: *mut int = &mut *sharedstate; + do task::spawn { + let sharedstate: &mut int = unsafe { &mut *ptr }; + access_shared(sharedstate, &mutex_2, 10); + c.send(()); + + } + } + { + access_shared(sharedstate, &mutex, 10); + p.recv(); + + assert_eq!(*sharedstate, 20); + } + + fn access_shared(sharedstate: &mut int, m: &Mutex, n: uint) { + for n.times { + let _lock = m.lock(); + let oldval = *sharedstate; + task::yield(); + *sharedstate = oldval + 1; + } + } + } + + #[test] + fn test_mutex_child_wakes_up_parent() { + let mutex = Mutex::new(); + + let mut lock = mutex.lock(); + let condition = WaitQueue::new(); + + do task::spawn_with(( + mutex.clone(), + condition.clone() + )) |(mutex, condition)| { + // Wait until parent's lock is released to avoid a race + // condition + do mutex.with_lock { } + + let woke_up_parent = condition.signal(); + assert!(woke_up_parent); + } + condition.wait_with(&mut lock); + } + + fn test_mutex_parent_wakes_up_child() { + let (port, chan) = comm::stream(); + + let mutex = Mutex::new(); + + let condition = WaitQueue::new(); + + do task::spawn_with(condition.clone()) |condition| { + let mut lock = mutex.lock(); + chan.send(()); + condition.wait_with(&mut lock); + chan.send(()); + } + + port.recv(); // Wait until child gets in the mutex + + let woken = condition.signal(); + assert!(woken); + + port.recv(); // Wait until child wakes up + } + + fn test_mutex_cond_broadcast_helper(num_waiters: uint) { + let mutex = Mutex::new(); + let condition = WaitQueue::new(); + + let ports = do vec::build_sized(num_waiters) |ports_push| { + for num_waiters.times { + let (port, chan) = comm::stream(); + ports_push(port); + do task::spawn_with(( + mutex.clone(), + condition.clone() + )) |(mutex, condition)| { + let mut lock = mutex.lock(); + chan.send(()); + condition.wait_with(&mut lock); + chan.send(()); + } + } + }; + + // wait until all children get in the mutex + for ports.iter().advance |port| { port.recv(); } + let num_woken = condition.broadcast(); + assert_eq!(num_woken, num_waiters); + // wait until all children wake up + for ports.iter().advance |port| { port.recv(); } + } + #[test] + fn test_mutex_cond_broadcast() { + test_mutex_cond_broadcast_helper(12); + } + #[test] + fn test_mutex_cond_broadcast_none() { + test_mutex_cond_broadcast_helper(0); + } + #[test] + fn test_mutex_cond_no_waiter() { + let mutex = Mutex::new(); + let mutex2 = mutex.clone(); + + let condition = WaitQueue::new(); + + do task::try { + mutex2.lock(); + }; + + assert!(!condition.signal()); + } + + #[ignore(cfg(windows))] + mod tests_for_kill_supporting_platforms { + use super::super::*; + + use std::task; + use std::comm; + use std::cell::Cell; + + use sync::wait_queue::WaitQueue; + + + #[test] + fn test_mutex_killed_simple() { + // Mutex must get automatically unlocked if failed/killed within. + let mutex = Mutex::new(); + + let mutex2 = mutex.clone(); + let result: Result<(),()> = do task::try { + let _lock = mutex2.lock(); + fail!() + }; + assert!(result.is_err()); + + // child task must have finished by the time try returns + mutex.lock(); + } + #[test] + fn test_mutex_killed_cond() { + // Getting killed during cond wait must not corrupt the mutex while + // unwinding (e.g. double unlock). + let mutex = Mutex::new(); + let condition = WaitQueue::new(); + + let mutex_2 = mutex.clone(); + let condition_2 = WaitQueue::new(); + let result: Result<(),()> = do task::try { + let (p, c) = comm::stream(); + do task::spawn { // linked + p.recv(); // wait for sibling to get in the mutex + task::yield(); + fail!(); + } + + let mut lock = mutex_2.lock(); + c.send(()); // tell sibling go ahead + condition_2.wait_with(&mut lock); // block forever + }; + assert!(result.is_err()); + + // child task must have finished by the time try returns + mutex.lock(); + + let woken = condition.signal(); + assert!(!woken); + } + #[test] + fn test_mutex_killed_broadcast() { + let mutex = Mutex::new(); + + let condition = WaitQueue::new(); + let (p, c) = comm::stream(); + + let mutex_2 = mutex.clone(); + let condition_2 = condition.clone(); + let result: Result<(),()> = do task::try { + let mut sibling_convos = ~[]; + for 2.times { + let (p,c) = comm::stream(); + let c = Cell::new(c); + sibling_convos.push(p); + let mutex_i = mutex_2.clone(); + let condition_i = condition_2.clone(); + // spawn sibling task + do task::spawn { // linked + let mut lock = mutex_i.lock(); + let c = c.take(); + c.send(()); // tell sibling to go ahead + let _z = SendOnFailure(c); + condition_i.wait_with(&mut lock); // block forever + } + } + for sibling_convos.iter().advance |p| { + p.recv(); // wait for sibling to get in the mutex + } + mutex_2.lock(); + c.send(sibling_convos); // let parent wait on all children + fail!(); + }; + assert!(result.is_err()); + // child task must have finished by the time try returns + let r = p.recv(); + for r.iter().advance |p| { p.recv(); } // wait on all its siblings + + + let woken = condition.broadcast(); + assert_eq!(woken, 0); + + struct SendOnFailure { + c: comm::Chan<()>, + } + + impl Drop for SendOnFailure { + fn drop(&self) { + self.c.send(()); + } + } + + fn SendOnFailure(c: comm::Chan<()>) -> SendOnFailure { + SendOnFailure { + c: c + } + } + } + #[test] #[should_fail] + fn test_mutex_different_conds() { + let condition_1 = WaitQueue::new(); + let condition_2 = WaitQueue::new(); + let mutex = Mutex::new(); + + let (p, c) = comm::stream(); + + do task::spawn_with(mutex.clone()) |mutex| { + let mut lock = mutex.lock(); + c.send(()); + condition_2.wait_with(&mut lock); + } + + p.recv(); + + assert!(condition_1.signal()) + } + } +} \ No newline at end of file diff --git a/src/libextra/sync/rwlock.rs b/src/libextra/sync/rwlock.rs new file mode 100644 index 0000000000000..c01ed4bc9fc66 --- /dev/null +++ b/src/libextra/sync/rwlock.rs @@ -0,0 +1,445 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::sync::{UnsafeAtomicRcBox}; +use std::unstable::finally::Finally; +use std::unstable::atomics; +use std::cell::Cell; +use std::task; + +use sync::semaphore::Semaphore; +use sync::mutex::Mutex; +use sync::unlock::Unlock; + + +struct RWLockInner { + // You might ask, "Why don't you need to use an atomic for the mode flag?" + // This flag affects the behaviour of readers (for plain readers, they + // assert on it; for downgraders, they use it to decide which mode to + // unlock for). Consider that the flag is only unset when the very last + // reader exits; therefore, it can never be unset during a reader/reader + // (or reader/downgrader) race. + // By the way, if we didn't care about the assert in the read unlock path, + // we could instead store the mode flag in write_downgrade's stack frame, + // and have the downgrade tokens store a borrowed pointer to it. + read_mode: bool, + // The only way the count flag is ever accessed is with xadd. Since it is + // a read-modify-write operation, multiple xadds on different cores will + // always be consistent with respect to each other, so a monotonic/relaxed + // consistency ordering suffices (i.e., no extra barriers are needed). + // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use + // acquire/release orderings superfluously. Change these someday. + read_count: atomics::AtomicUint +} + +/// A blocking, no-starvation, readers-writer lock. +#[deriving(Clone)] +pub struct RWLock { + priv order_lock: Mutex, + priv access_lock: Semaphore, + priv state: UnsafeAtomicRcBox +} + +impl RWLock { + /// Create a readers-writer lock. + #[inline] + pub fn new() -> RWLock { + RWLock { + order_lock: Mutex::new(), + access_lock: Semaphore::new(), + state: UnsafeAtomicRcBox::new(RWLockInner { + read_mode: false, + read_count: atomics::AtomicUint::new(0) + }) + } + } + + /** + * A convenience function to wrap the more complicated (but more + * powerful read_lock method.) Obtains a read lock, and then + * invokes the blk argument. + */ + #[inline] + pub fn with_read_lock(&self, blk: &fn() -> U) -> U { + let _lock = self.read_lock(); + blk() + } + + /** + * A convenience function to wrap the more complicated (but more + * powerful write_lock method.) Obtains a write lock, and then + * invokes the blk argument. + */ + #[inline] + pub fn with_write_lock(&self, blk: &fn() -> U) -> U { + let _lock = self.write_lock(); + blk() + } + + + /** + * Obtain read access to the lock. + * + * Other tasks can hold read access to the lock at the same + * time. Read access to a lock prevents tasks from acquiring write + * access. If any task has write access to the lock this method + * blocks until the write access is released. + */ + #[inline] + pub fn read_lock<'r>(&'r self) -> ReadLock <'r> { + unsafe { + self.acquire_read_lock(); + ReadLock { rwlock: self } + } + } + + #[inline] + unsafe fn acquire_read_lock(&self) { + do task::unkillable { + let _lock = self.order_lock.lock(); + + let state = &mut *self.state.get(); + let old_count = state.read_count.fetch_add(1, atomics::Acquire); + if old_count == 0 { + self.access_lock.wait(); + state.read_mode = true; + } + } + } + + #[inline] + unsafe fn release_read_lock(&self) { + do task::unkillable { + let state = &mut *self.state.get(); + assert!(state.read_mode); + let old_count = state.read_count.fetch_sub(1, atomics::Release); + + assert!(old_count > 0); + if old_count == 1 { + state.read_mode = false; + // Note: this release used to be outside of a locked access + // to exclusive-protected state. If this code is ever + // converted back to such (instead of using atomic ops), + // this access MUST NOT go inside the exclusive access. + self.access_lock.signal(); + } + } + } + + + /** + * Obtain write access to the lock. + * + * No other tasks access the lock while the write access is + * held. This method blocks until other tasks are done accessing + * the lock. + */ + #[inline] + pub fn write_lock<'r>(&'r self) -> WriteLock<'r> { + unsafe { + self.acquire_write_lock(); + WriteLock { rwlock: self, downgraded: false } + } + } + + #[inline] + unsafe fn acquire_write_lock(&self) { + do task::unkillable { + let _lock = self.order_lock.lock(); + self.access_lock.wait() + } + } + + #[inline] + unsafe fn release_write_lock(&self) { + do task::unkillable { + self.access_lock.signal(); + } + } + + #[inline] + unsafe fn downgrade_write_lock(&self) { + do task::unkillable { + let state = &mut *self.state.get(); + assert!(!state.read_mode); + state.read_mode = true; + // If a reader attempts to enter at this point, both the + // downgrader and reader will set the mode flag. This is fine. + let old_count = state.read_count.fetch_add(1, atomics::Release); + + // If another reader was already blocking, we need to hand-off + // the "reader cloud" access lock to them. + if old_count != 0 { + // Guaranteed not to let another writer in, because + // another reader was holding the order_lock. Hence they + // must be the one to get the access_lock (because all + // access_locks are acquired with order_lock held). See + // the comment in write_cond for more justification. + self.access_lock.signal(); + } + } + } +} + + +/// A handle on a rwlock value which guarantees read access to it. +pub struct ReadLock<'self> { priv rwlock: &'self RWLock } +#[unsafe_destructor] +impl <'self> Drop for ReadLock<'self> { + // Don't inline this due to issue #7793 + fn drop(&self) { + unsafe { + self.rwlock.release_read_lock() + } + } +} + +impl <'self> Unlock for ReadLock<'self> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> U) -> U { + unsafe { + self.rwlock.release_read_lock(); + let cell = Cell::new(blk); + do (|| cell.take()()).finally { + self.rwlock.acquire_read_lock() + } + } + } +} + + + +/// A handle to a rwlock which guarantees write access to it. +pub struct WriteLock<'self> { priv rwlock: &'self RWLock, priv downgraded: bool } +#[unsafe_destructor] +impl <'self> Drop for WriteLock<'self> { + // Don't inline this due to issue #7793 + fn drop(&self) { + unsafe { + if !self.downgraded { + self.rwlock.release_write_lock() + } + } + } +} + +impl <'self> WriteLock<'self> { + /// Downgrade a write lock to a read lock. + #[inline] + pub fn downgrade(self) -> ReadLock<'self> { + unsafe { + let mut writelock = self; + + writelock.rwlock.downgrade_write_lock(); + writelock.downgraded = true; + + ReadLock { rwlock: writelock.rwlock } + } + } +} + +impl <'self> Unlock for WriteLock<'self> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> U) -> U { + unsafe { + self.rwlock.release_write_lock(); + let cell = Cell::new(blk); + do (|| cell.take()()).finally { + self.rwlock.acquire_write_lock() + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::comm; + use std::task; + use std::vec; + + use sync::unlock::Unlock; + + + fn nest_read_locks(nesting: uint) { + let rwlock = RWLock::new(); + + let read_locks = do vec::build_sized(nesting) |push| { + for nesting.times { + push(rwlock.read_lock()) + } + }; + + // Destroy the read locks + let _lock = read_locks; + } + + #[test] + fn test_rwlock_can_read_lock() { nest_read_locks(1) } + + #[test] + fn test_rwlock_can_nest_read_locks() { nest_read_locks(2) } + + #[test] + fn test_rwlock_can_nest_read_locks_more_than_once() { nest_read_locks(5) } + + + pub enum RWLockMode { Read, Write, Downgrade } + + pub fn lock_rwlock_in_mode(x: &RWLock, mode: RWLockMode, blk: &fn()) { + match mode { + Read => do x.with_read_lock { + blk() + }, + Write => do x.with_write_lock { + blk() + }, + Downgrade => { + let write_lock = x.write_lock(); + let _lock = write_lock.downgrade(); + blk() + } + } + } + fn test_rwlock_handshake(x: RWLock, + mode1: RWLockMode, + mode2: RWLockMode, + make_mode2_go_first: bool) { + // Much like sem_multi_resource. + let x2 = x.clone(); + let (p1, c1) = comm::stream(); + let (p2, c2) = comm::stream(); + do task::spawn { + if !make_mode2_go_first { + p2.recv(); // parent sends to us once it locks, or ... + } + do lock_rwlock_in_mode(&x2, mode2) { + if make_mode2_go_first { + c1.send(()); // ... we send to it once we lock + } + p2.recv(); + c1.send(()); + } + } + if make_mode2_go_first { + p1.recv(); // child sends to us once it locks, or ... + } + do lock_rwlock_in_mode(&x, mode1) { + if !make_mode2_go_first { + c2.send(()); // ... we send to it once we lock + } + c2.send(()); + p1.recv(); + } + } + #[test] + fn test_rwlock_readers_and_readers() { + test_rwlock_handshake(RWLock::new(), Read, Read, false); + // The downgrader needs to get in before the reader gets in, otherwise + // they cannot end up reading at the same time. + test_rwlock_handshake(RWLock::new(), Downgrade, Read, false); + test_rwlock_handshake(RWLock::new(), Read, Downgrade, true); + // Two downgrade can never both end up reading at the same time. + } + #[test] + fn test_rwlock_downgrade_unlock_read() { + let x = RWLock::new(); + + { + let write_lock = x.write_lock(); + let _lock = write_lock.downgrade(); + } + + test_rwlock_handshake(x, Read, Read, false); + } + + #[test] + fn test_unlock_lets_others_acquire_lock() { + let rwlock = RWLock::new(); + let (unlocked_port, unlocked_chan) = comm::oneshot(); + let (locked_port, locked_chan) = comm::oneshot(); + + let mut write_lock = rwlock.write_lock(); + + do task::spawn_with(( + rwlock.clone(), + unlocked_port, + locked_chan + )) |(rwlock, unlocked_port, locked_chan)| { + comm::recv_one(unlocked_port); + let _write_lock = rwlock.write_lock(); + comm::send_one(locked_chan, ()) + } + + do write_lock.unlock { + comm::send_one(unlocked_chan, ()); + comm::recv_one(locked_port) + } + } + + #[ignore(cfg(windows))] + mod try_supporting_platforms_only { + use super::super::*; + use super::*; + + use std::task; + + + fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) { + // Mutex must get automatically unlocked if failed/killed within. + let mutex = RWLock::new(); + + let mutex_2 = mutex.clone(); + let result: Result<(),()> = do task::try { + do lock_rwlock_in_mode(&mutex_2, mode1) { + fail!(); + } + }; + assert!(result.is_err()); + + // child task must have finished by the time try returns + do lock_rwlock_in_mode(&mutex, mode2) { } + } + + #[test] + fn test_rwlock_reader_killed_writer() { + rwlock_kill_helper(Read, Write) + } + #[test] + fn test_rwlock_writer_killed_reader() { + rwlock_kill_helper(Write, Read) + } + #[test] + fn test_rwlock_reader_killed_reader() { + rwlock_kill_helper(Read, Read); + } + #[test] + fn test_rwlock_writer_killed_writer() { + rwlock_kill_helper(Write, Write); + } + #[test] + fn test_rwlock_downgrader_killed_read() { + rwlock_kill_helper(Downgrade, Read) + } + #[test] + fn test_rwlock_read_killed_downgrader() { + rwlock_kill_helper(Read, Downgrade) + } + #[test] + fn test_rwlock_downgrader_killed_writer() { + rwlock_kill_helper(Downgrade, Write) + } + #[test] + fn test_rwlock_writer_killed_downgrader() { + rwlock_kill_helper(Write, Downgrade) + } + } +} \ No newline at end of file diff --git a/src/libextra/sync/semaphore.rs b/src/libextra/sync/semaphore.rs new file mode 100644 index 0000000000000..5cc3b4b432248 --- /dev/null +++ b/src/libextra/sync/semaphore.rs @@ -0,0 +1,117 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[allow(missing_doc)]; + +use std::unstable::sync::{Exclusive, exclusive}; + +use sync::wait_queue::WaitQueue; + +/// A counting semaphore. +#[deriving(Clone)] +pub struct Semaphore { + priv count: Exclusive, + priv waiters: WaitQueue +} + +impl Semaphore { + /// Create a counting semaphore that starts with a count of 1. + #[inline] + pub fn new() -> Semaphore { + Semaphore::new_with_count(1) + } + + /// Create a counting semaphore + #[inline] + pub fn new_with_count(count: int) -> Semaphore { + Semaphore { + count: exclusive(count), + waiters: WaitQueue::new() + } + } + + #[inline] + pub fn wait(&self) { + unsafe { + let maybe_wait_event = do self.count.with |c| { + *c -= 1; + if *c >= 0 { None } else { + Some(self.waiters.wait_event()) + } + }; + + match maybe_wait_event { + // Ordering of waits matters so this must start inside + // the exclusive lock + None => {}, + Some(wait_event) => wait_event.wait() + } + } + } + + /// Wake up a blocked task. Returns false if there was no blocked + /// task. + #[inline] + pub fn signal(&self) -> bool { + unsafe { + let someone_was_waiting = do self.count.with |c| { + *c += 1; + *c > 0 + }; + + // Ordering of signals doesn't matter so this can be + // dragged out of the exclusive lock + if someone_was_waiting { false } else { + self.waiters.signal() + } + } + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + use std::cell::Cell; + use std::comm; + use std::task; + + #[test] + fn test_sem_acquire_release() { + let s = Semaphore::new(); + s.wait(); + s.signal(); + s.wait(); + } + #[test] + fn test_sem_runtime_friendly_blocking() { + // Force the runtime to schedule two threads on the same sched_loop. + // When one blocks, it should schedule the other one. + do task::spawn_sched(task::ManualThreads(1)) { + let s = Semaphore::new(); + let s2 = s.clone(); + let (p,c) = comm::stream(); + let child_data = Cell::new((s2, c)); + s.wait(); + let (s2, c) = child_data.take(); + do task::spawn { + c.send(()); + s2.wait(); + s2.signal(); + c.send(()); + } + p.recv(); // wait for child to come alive + for 5.times { task::yield(); } // let the child contend + s.signal(); + p.recv(); // wait for child to be done + } + } +} diff --git a/src/libextra/sync/shared_mut/mutex_arc.rs b/src/libextra/sync/shared_mut/mutex_arc.rs new file mode 100644 index 0000000000000..8b85b166ec9d9 --- /dev/null +++ b/src/libextra/sync/shared_mut/mutex_arc.rs @@ -0,0 +1,188 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::sync::UnsafeAtomicRcBox; +use std::task; + +use sync::mutex::{Mutex, Lock}; +use sync::unlock::Unlock; + + +struct MutexArcInner { mutex: Mutex, failed: bool, data: T } + +impl MutexArcInner { + fn assert_not_failed(&self) { + if self.failed { + fail!("Poisoned MutexArc - another task failed inside!") + } + } +} + +/// An atomically reference counted variable with mutable data +/// protected by a blocking mutex. +pub struct MutexArc { priv contents: UnsafeAtomicRcBox> } + +impl Clone for MutexArc { + #[inline] + fn clone(&self) -> MutexArc { + MutexArc { contents: self.contents.clone() } + } +} + +impl MutexArc { + /// Create a mutex-protected ARC with the supplied data. + #[inline] + pub fn new(user_data: T) -> MutexArc { + let data = MutexArcInner { + mutex: Mutex::new(), + failed: false, + data: user_data + }; + MutexArc { contents: UnsafeAtomicRcBox::new(data) } + } + + /** + * Access the underlying mutable data with mutual exclusion from + * other tasks. + * + * The mutex will be locked until the access cookie is released; + * all other tasks wishing to access the data will block until the + * cookie is released. + * + * # Safety notes + * The reason this function is 'unsafe' is because it is possible + * to construct a circular reference among multiple ARCs by + * mutating the underlying data. This creates potential for + * deadlock, but worse, this will guarantee a memory leak of all + * involved ARCs. Using mutex ARCs inside of other ARCs is safe in + * the absence of circular references. + * + * If you wish to nest mutex_arcs, one strategy for ensuring + * safety at runtime is to add a "nesting level counter" inside + * the stored data, and when traversing the arcs, assert that they + * monotonically decrease. + * + * # Failure + * Failing while inside the ARC will unlock the ARC while + * unwinding, so that other tasks won't block forever. It will + * also poison the ARC: any tasks that subsequently try to access + * it (including those already blocked on the mutex) will also + * fail immediately. + */ + #[inline] + pub unsafe fn locked<'r>(&'r self) -> Locked<'r, T> { + let state = &*self.contents.get(); + + let lock = state.mutex.lock(); + state.assert_not_failed(); + Locked { lock: lock, mutex_arc: self.clone() } + } + + /** + * A convenience function to wrap the more complicated (but more + * powerful locked method.) Obtains a lock, accesses the value, + * and then invokes the blk argument. + */ + #[inline] + pub unsafe fn get(&self, blk: &fn(&mut T) -> U) -> U { + let mut locked = self.locked(); + blk(locked.get()) + } +} + +/// A value that guarantees exclusive access to a MutexArc until +/// destroyed +pub struct Locked<'self, T> { priv lock: Lock<'self>, priv mutex_arc: MutexArc } + +#[unsafe_destructor] +impl <'self, T: Send> Drop for Locked<'self, T> { + // Don't inline this due to issue #7793 + pub fn drop(&self) { + unsafe { + /* There may be an assertion similar to + assert!(!*self.failed)` that can be made here. This + assertion might be false in case of cond.wait() */ + + if task::failing() { + let state = &mut *self.mutex_arc.contents.get(); + state.failed = true + } + } + } +} + +impl <'self, T: Send> Locked<'self, T> { + /// Access the underlying locked data. + #[inline] + pub fn get(&'self mut self) -> &'self mut T { + unsafe { + let value = &'self mut *self.mutex_arc.contents.get(); + &'self mut value.data + } + } +} + +impl <'self, T: Send> Unlock for Locked<'self, T> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> V) -> V { + let result = self.lock.unlock(blk); + unsafe { + let state = &*self.mutex_arc.contents.get(); + state.assert_not_failed() + } + result + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + use std::task; + use std::cell::Cell; + + + #[test] #[should_fail] #[ignore(cfg(windows))] + fn test_failed_locks_poison_locks() { + unsafe { + let arc = MutexArc::new(()); + let arc2 = arc.clone(); + + let _: Result<(), ()> = do task::try { + let _locked = arc2.locked(); + fail!(); + }; + + let _locked = arc.locked(); + } + } + + #[test] #[should_fail] #[ignore(cfg(windows))] + fn test_arc_unlock_poison() { + unsafe { + let arc = MutexArc::new(()); + let arc2 = Cell::new(arc.clone()); + + let mut locked = arc.locked(); + do locked.unlock { + let arc3 = arc2; + // Poison the arc + let _: Result<(), ()> = do task::try { + let arc4 = arc3.take(); + let _locked = arc4.locked(); + fail!() + }; + } + + // Should fail because of poison + } + } +} \ No newline at end of file diff --git a/src/libextra/sync/shared_mut/rwarc.rs b/src/libextra/sync/shared_mut/rwarc.rs new file mode 100644 index 0000000000000..900df18f93a8a --- /dev/null +++ b/src/libextra/sync/shared_mut/rwarc.rs @@ -0,0 +1,458 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::sync::UnsafeAtomicRcBox; +use std::cast; +use std::task; + +use sync::rwlock::{RWLock, ReadLock, WriteLock}; +use sync::unlock::Unlock; + + +struct RWArcInner { rwlock: RWLock, contents: T, failed: bool } +impl RWArcInner { + #[inline] + fn new(initial_value: T) -> RWArcInner { + RWArcInner { + rwlock: RWLock::new(), + contents: initial_value, + failed: false + } + } + + #[inline] + fn assert_not_failed(&self) { + if self.failed { + fail!("Poisoned RWArc - another task failed inside!") + } + } + + #[inline] + fn read_lock<'r>(&'r self) -> ReadLock<'r> { + let read_lock = self.rwlock.read_lock(); + self.assert_not_failed(); + read_lock + } + + #[inline] + fn write_lock<'r>(&'r self) -> WriteLock<'r> { + let write_lock = self.rwlock.write_lock(); + self.assert_not_failed(); + write_lock + } +} + + +/** + * A dual-mode atomically referenced counted value protected by a + * reader-writer lock. The data can be accessed mutably or immutably, + * and immutably-accessing tasks may run concurrently. + */ +pub struct RWArc { priv contents: UnsafeAtomicRcBox> } +impl RWArc { + /// Create an RWArc. + #[inline] + pub fn new(initial_value: T) -> RWArc { + let data = RWArcInner::new(initial_value); + RWArc { contents: UnsafeAtomicRcBox::new(data) } + } + + + /** + * A convenience function to wrap the more complicated (but more + * powerful read_locked method.) Obtains a read lock, accesses the + * value, and then invokes the blk argument. + */ + #[inline] + pub fn read(&mut self, blk: &fn(&T) -> U) -> U { + let mut locked = self.read_locked(); + blk(locked.get()) + } + + /** + * A convenience function to wrap the more complicated (but more + * powerful write_locked method.) Obtains a write lock, accesses + * the value, and then invokes the blk argument. + */ + #[inline] + pub fn write(&mut self, blk: &fn(&mut T) -> U) -> U { + let mut locked = self.write_locked(); + blk(locked.get()) + } + + + /** + * Obtain permission to read from the underlying mutable data. + * + * # Failure + * If the RWArc is poisoned then this method will fail. + */ + #[inline] + pub fn read_locked<'r>(&'r mut self) -> ReadLocked<'r, T> { + unsafe { + let state = &mut *self.contents.get(); + ReadLocked { + rwarc: self, + lock: state.read_lock() + } + } + } + + /** + * Access the underlying mutable data with mutual exclusion from + * other tasks. The RWArc be locked until the access cookie is + * released; all other tasks wishing to access the data will block + * until the cookie is released. + * + * # Failure + * + * Failing while inside the RWArc will unlock the RWArc while + * unwinding, so that other tasks won't block forever. It will + * also poison the RWArc: any tasks that subsequently try to + * access it (including those already blocked on the RWArc) will + * also fail immediately. + */ + #[inline] + pub fn write_locked<'r>(&'r mut self) -> WriteLocked<'r, T> { + unsafe { + let state = &mut *self.contents.get(); + WriteLocked { + inner: WriteLockedInner { rwarc: self }, + lock: state.write_lock() + } + } + } + + #[inline] + unsafe fn get<'r>(&'r mut self) -> &'r mut T { + let shared_mut_value = &'r mut *self.contents.get(); + &'r mut shared_mut_value.contents + } +} + +impl Clone for RWArc { + #[inline] + fn clone(&self) -> RWArc { + RWArc { contents: self.contents.clone() } + } +} + +/// A handle to an atomically reference counted value that has a read +/// lock on it. +pub struct ReadLocked<'self, T> { + priv lock: ReadLock<'self>, + priv rwarc: &'self mut RWArc +} + +impl <'self, T: Freeze + Send> ReadLocked<'self, T> { + /// Access the data of the read locked value + #[inline] + pub fn get(&'self mut self) -> &'self T { + unsafe { + let immut_pointer: &'self T = self.rwarc.get(); + immut_pointer + } + } +} + +impl <'self, T: Send> Unlock for ReadLocked<'self, T> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> V) -> V { + let result = self.lock.unlock(blk); + unsafe { + let state = &*self.rwarc.contents.get(); + state.assert_not_failed(); + } + result + } +} + + +/// A handle to an atomically reference counted value that has a write +/// lock on it +pub struct WriteLocked<'self, T> { + priv lock: WriteLock<'self>, + priv inner: WriteLockedInner<'self, T> +} + +// Can't just be a newtype due to issue 7899 +struct WriteLockedInner<'self, T> { rwarc: &'self mut RWArc } + +#[unsafe_destructor] +impl <'self, T: Freeze + Send> Drop for WriteLockedInner<'self, T> { + // Don't inline this due to issue #7793 + fn drop(&self) { + unsafe { + if task::failing() { + let myself = cast::transmute_mut(self); + let state = &mut *myself.rwarc.contents.get(); + state.failed = true + } + } + } +} + +impl <'self, T: Freeze + Send> WriteLocked<'self, T> { + /// Access the data behind a write locked value. + #[inline] + pub fn get(&'self mut self) -> &'self mut T { + unsafe { + let mut_pointer: &'self mut T = self.inner.rwarc.get(); + mut_pointer + } + } + + /// Consumes a write locked value, and downgrades it to a read + /// locked value. + #[inline] + pub fn downgrade(self) -> ReadLocked<'self, T> { + let WriteLocked { inner, lock } = self; + ReadLocked { + rwarc: inner.rwarc, + lock: lock.downgrade() + } + } +} + +impl <'self, T: Freeze + Send> Unlock for WriteLocked<'self, T> { + #[inline] + pub fn unlock(&mut self, blk: ~once fn() -> V) -> V { + let result = self.lock.unlock(blk); + unsafe { + let state = &*self.inner.rwarc.contents.get(); + state.assert_not_failed(); + } + result + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::comm; + use std::task; + + use sync::wait_queue::WaitQueue; + + + #[test] + fn test_readers_can_not_read_during_writes() { + /* + This is fundamentally a statistical test. There are a large + number of ways writes can be combined with reads. This test + attempts to explore that space of possibilities, and possible + finds a few incorrect situations. + */ + let number_of_writes_run: uint = 10; + let readers: uint = 5; + + let mut arc: RWArc = RWArc::new(0); + + // Spawn readers that try to catch the main thread in the act + // of writing. + let mut child_tasks = ~[]; + for readers.times { + let mut builder = task::task(); + builder.future_result(|m| child_tasks.push(m)); + do builder.spawn_with(arc.clone()) |mut arc| { + let mut num = arc.read_locked(); + assert!(*num.get() >= 0); + } + }; + + // Write to the arc, and see if it will be caught. + { + let mut write_locked = arc.write_locked(); + let num = write_locked.get(); + for number_of_writes_run.times { + let tmp = *num; + *num = -1; + task::yield(); + *num = tmp + 1; + } + } + + // Wait for child tasks to finish + for child_tasks.iter().advance |r| { + r.recv(); + } + + // Do a sanity check + let mut num = arc.read_locked(); + assert_eq!(*num.get(), number_of_writes_run as int); + } + + fn test_rw_write_cond_downgrade_read_race_helper() { + // Tests that when a downgrader hands off the "reader cloud" lock + // because of a contending reader, a writer can't race to get it + // instead, which would result in readers_and_writers. This tests + // the sync module rather than this one, but it's here because an + // rwarc gives us extra shared state to help check for the race. + // If you want to see this test fail, go to sync.rs and replace the + // line in RWlock::write_cond() that looks like: + // "blk(&Condvar { order: opt_lock, ..*cond })" + // with just "blk(cond)". + let condition = WaitQueue::new(); + let mut arc = RWArc::new(true); + let (wp, wc) = comm::stream(); + + // writer task + do task::spawn_with(( + arc.clone(), + condition.clone() + )) |mut (arc, condition)| { + let mut write_locked = arc.write_locked(); + wc.send(()); // tell downgrader it's ok to go + + condition.wait_with(&mut write_locked); + + // The core of the test is here: the condvar reacquire path + // must involve order_lock, so that it cannot race with a reader + // trying to receive the "reader cloud lock hand-off". + *write_locked.get() = false; + } + + wp.recv(); // wait for writer to get in + + { + let arc2 = arc.clone(); + let write_locked = arc.write_locked(); + + // make writer contend in the cond-reacquire path + condition.signal(); + + // make a reader task to trigger the "reader cloud lock" handoff + let (rp, rc) = comm::stream(); + do task::spawn_with(arc2) |mut arc| { + rc.send(()); + let _lock = arc.read_locked(); + } + rp.recv(); // wait for reader task to exist + + let mut read_locked = write_locked.downgrade(); + + // if writer mistakenly got in, make sure it mutates state + // before we assert on it + for 5.times { task::yield() } + // make sure writer didn't get in. + assert!(*read_locked.get()); + } + } + + #[test] + fn test_rw_write_cond_downgrade_read_race() { + // Ideally the above test case would have yield statements in it that + // helped to expose the race nearly 100% of the time... but adding + // yields in the intuitively-right locations made it even less likely, + // and I wasn't sure why :( . This is a mediocre "next best" option. + for 8.times { test_rw_write_cond_downgrade_read_race_helper() } + } + + #[ignore(cfg(windows))] + mod tests_for_fail_supported_platforms { + use super::super::*; + + use std::cell::Cell; + use std::task; + + + #[test] #[should_fail] + fn test_failed_writes_poison_reads() { + let mut arc = RWArc::new(()); + let cell = Cell::new(arc.clone()); + + let _: Result<(), ()> = do task::try { + let mut arc = cell.take(); + let _write_locked = arc.write_locked(); + fail!(); + }; + + arc.read_locked(); + } + + #[test] #[should_fail] + fn test_failed_writes_poison_writes() { + let mut arc = RWArc::new(()); + let cell = Cell::new(arc.clone()); + + let _: Result<(), ()> = do task::try { + let mut arc = cell.take(); + let _write_locked = arc.write_locked(); + fail!() + }; + + arc.write_locked(); + } + + #[test] + fn test_failed_reads_do_not_poison_reads() { + let mut arc = RWArc::new(()); + let cell = Cell::new(arc.clone()); + + let _: Result<(), ()> = do task::try { + let mut arc = cell.take(); + let _read_locked = arc.read_locked(); + fail!() + }; + + arc.read_locked(); + } + + #[test] + fn test_failed_reads_do_not_poison_writes() { + let mut arc = RWArc::new(()); + let cell = Cell::new(arc.clone()); + + let _: Result<(), ()> = do task::try { + let mut arc = cell.take(); + let _read_locked = arc.read_locked(); + fail!() + }; + + arc.write_locked(); + } + + #[test] + fn test_failed_downgraded_reads_do_not_poison_writes() { + let mut arc = RWArc::new(()); + let cell = Cell::new(arc.clone()); + + let _: Result<(), ()> = do task::try { + let mut arc = cell.take(); + let write_locked = arc.write_locked(); + let _read_locked = write_locked.downgrade(); + fail!() + }; + + arc.write_locked(); + } + + #[test] #[should_fail] + fn test_arc_unlock_poison() { + let mut arc = RWArc::new(()); + let arc2 = Cell::new(arc.clone()); + + let mut write_locked = arc.write_locked(); + do write_locked.unlock { + let arc3 = arc2; + + // Poison the arc + let _: Result<(), ()> = do task::try { + let mut arc4 = arc3.take(); + let _write_locked = arc4.write_locked(); + fail!() + }; + } + + // Should fail because of poison + } + } +} \ No newline at end of file diff --git a/src/libextra/sync/unlock.rs b/src/libextra/sync/unlock.rs new file mode 100644 index 0000000000000..22d51cce6cf16 --- /dev/null +++ b/src/libextra/sync/unlock.rs @@ -0,0 +1,51 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +/// A trait for unlockable locks. +pub trait Unlock { + /** + * Temporarily unlocks a lock. + * + * # Safety notes + * In order to guarantee safety for users, and implementors of + * Unlock the following restrictions are made. + * + * - The passed in closure is owned so that it cannot refer to + * borrowed values such as the lock itself. + * + * - The passed in closure is once to force that it is used at + * most once. + * + * - The U parameter forces unlock to fail, or use the passed in + * closure at least once. + * + * - The self pointer is mutable so as to invalidate references to + * internal state of the lock. + */ + fn unlock(&mut self, ~once fn() -> U) -> U; +} + +/// Like Unlock but provides stronger guarantees about the enviroment +/// of the closure argument. +pub trait ScopedUnlock { + /// See Unlock::unlock. + fn unlock_with(&mut self, T, extern fn(T) -> U) -> U; +} + +impl ScopedUnlock for U { + #[inline] + fn unlock_with(&mut self, msg: T, blk: extern fn(T) -> V) -> V { + let cell = Cell::new(msg); + do self.unlock { + blk(cell.take()) + } + } +} diff --git a/src/libextra/sync/wait_queue.rs b/src/libextra/sync/wait_queue.rs new file mode 100644 index 0000000000000..98a75ebb80de2 --- /dev/null +++ b/src/libextra/sync/wait_queue.rs @@ -0,0 +1,204 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::unstable::sync::{Exclusive, exclusive}; +use std::cell::Cell; +use std::comm; + +use sync::unlock::Unlock; + + +type SignalEnd = comm::ChanOne<()>; +type WaitEnd = comm::PortOne<()>; + +/// A very basic primitive for synchronization, a queue of wait +/// events. +#[deriving(Clone)] +pub struct WaitQueue { + // The exclusive lock is needed to prevent a race + priv head: Exclusive>, + priv tail: comm::SharedChan +} + +/// A event that can be waited on. +pub struct WaitEvent { + priv wait_end: WaitEnd +} + +impl WaitEvent { + /// Wait on the associated wait queue. + pub fn wait(self) { + self.wait_end.recv() + } +} + + +impl WaitQueue { + /// Create a wait queue. + #[inline] + pub fn new() -> WaitQueue { + let (head, tail) = comm::stream(); + WaitQueue { head: exclusive(head), tail: comm::SharedChan::new(tail) } + } + + /// Wake up a blocked task. Returns false if there was no blocked + /// task. + #[inline] + pub fn signal(&self) -> bool { + // Loop popping from the queue, and sending until we + // succesfully wake up a task, or exhaust the queue. + loop { + let maybe_signal_end = unsafe { + do self.head.with |head| { + // The peek is mandatory to make sure recv doesn't block. + if head.peek() { Some(head.recv()) } else { None } + } + }; + + match maybe_signal_end { + None => return false, + Some(signal_end) => if comm::try_send_one(signal_end, ()) { + return true + } + } + } + } + + /// Wake up all tasks waiting on the wait queue. + #[inline] + pub fn broadcast(&self) -> uint { + unsafe { + do self.head.with |head| { + // The peek is mandatory to make sure recv doesn't block. + let mut count = 0; + while head.peek() { + if comm::try_send_one(head.recv(), ()) { + count += 1; + } + } + count + } + } + } + + /// Create a wait thunk. + #[inline] + pub fn wait_event(&self) -> WaitEvent { + let (wait_end, signal_end) = comm::oneshot(); + self.tail.send(signal_end); + WaitEvent { wait_end: wait_end } + } + + /// Wait on the wait queue. + #[inline] + pub fn wait(&self) { + self.wait_event().wait() + } + + /// Wait on the wait queue with the unlockable value unlocked. + #[inline] + pub fn wait_with(&self, lock: &mut T) { + let waiter = self.wait_event(); + let cell = Cell::new(waiter); + do lock.unlock { + cell.take().wait() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::task; + use std::comm; + use std::uint; + + use sync::unlock::Unlock; + + + #[test] + fn test_wait_queue_broadcast_wakes_all_waiters() { + let threads = 10; + + let c = WaitQueue::new(); + + let mut ds = ~[]; + for threads.times { + let (p, d) = comm::stream(); + ds.push(p); + do task::spawn_with((c.clone(), d)) |(c, d)| { + c.wait_with(&mut SendOne(d)) + } + } + + // Wait for all the tasks to wait + for ds.consume_iter().advance |d| { + d.recv() + } + + // Check if all threads were waken up + assert_eq!(threads, c.broadcast()); + } + + #[test] + fn test_waits_queue_in_order() { + let threads = 10; + + let c = WaitQueue::new(); + let (numbers_port, numbers_chan) = comm::stream(); + let numbers_chan = comm::SharedChan::new(numbers_chan); + + for uint::iterate(0, threads) |ii| { + let (p, d) = comm::stream(); + do task::spawn_with(( + numbers_chan.clone(), + c.clone(), + d + )) |(numbers_chan, c, d)| { + c.wait_with(&mut SendOne(d)); + numbers_chan.send(ii) + } + + // Wait for the task to wait + p.recv() + } + + // Check if the waits queued in order + for uint::iterate(threads, 0) |ii| { + // Each new thread signaled should send the right number + assert!(c.signal()); + assert_eq!(numbers_port.recv(), ii) + } + } + + #[test] + fn test_signal_wakes_a_single_task() { + let c = WaitQueue::new(); + + let (p, d) = comm::stream(); + do task::spawn_with((c.clone(), d)) |(c, d)| { + c.wait_with(&mut SendOne(d)) + } + + // Wait for the task to complete + p.recv(); + + assert!(c.signal()); + } + + struct SendOne(comm::Chan<()>); + impl Unlock for SendOne { + fn unlock(&mut self, blk: ~once fn() -> U) -> U { + (**self).send(()); + blk() + } + } +} diff --git a/src/test/bench/graph500-bfs.rs b/src/test/bench/graph500-bfs.rs index 8a0d9bcead0fb..33267abc81cc1 100644 --- a/src/test/bench/graph500-bfs.rs +++ b/src/test/bench/graph500-bfs.rs @@ -17,7 +17,7 @@ An implementation of the Graph500 Breadth First Search problem in Rust. */ extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use extra::time; use extra::ringbuf::RingBuf; use extra::container::Deque; @@ -230,7 +230,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result { } /// A parallel version of the bfs function. -fn pbfs(graph: &arc::ARC, key: node_id) -> bfs_result { +fn pbfs(graph: &Arc, key: node_id) -> bfs_result { // This works by doing functional updates of a color vector. let graph_vec = graph.get(); // FIXME #3387 requires this temp @@ -263,7 +263,7 @@ fn pbfs(graph: &arc::ARC, key: node_id) -> bfs_result { i += 1; let old_len = colors.len(); - let color = arc::ARC(colors); + let color = Arc::new(colors); let color_vec = color.get(); // FIXME #3387 requires this temp colors = do par::mapi(*color_vec) { @@ -444,7 +444,7 @@ fn main() { let mut total_seq = 0.0; let mut total_par = 0.0; - let graph_arc = arc::ARC(graph.clone()); + let graph_arc = Arc::new(graph.clone()); do gen_search_keys(graph, num_keys).map() |root| { io::stdout().write_line(~""); diff --git a/src/test/bench/msgsend-ring-mutex-arcs.rs b/src/test/bench/msgsend-ring-mutex-arcs.rs index a60e0b9e340bd..3b584fe9496f7 100644 --- a/src/test/bench/msgsend-ring-mutex-arcs.rs +++ b/src/test/bench/msgsend-ring-mutex-arcs.rs @@ -17,54 +17,70 @@ extern mod extra; -use extra::arc; +use extra::sync::shared_mut::mutex_arc::{MutexArc}; +use extra::sync::wait_queue::WaitQueue; use extra::future; use extra::time; use std::cell::Cell; use std::io; use std::os; use std::uint; -use std::vec; -// A poor man's pipe. -type pipe = arc::MutexARC<~[uint]>; -fn send(p: &pipe, msg: uint) { +#[deriving(Clone)] +struct Pipe { + mutexarc: MutexArc<~[uint]>, + condition: WaitQueue +} + +fn send(p: &mut Pipe, msg: uint) { unsafe { - do p.access_cond |state, cond| { - state.push(msg); - cond.signal(); + do p.mutexarc.get |queue| { + queue.push(msg) } + p.condition.signal(); } } -fn recv(p: &pipe) -> uint { + +fn recv(p: &mut Pipe) -> uint { unsafe { - do p.access_cond |state, cond| { - while state.is_empty() { - cond.wait(); + loop { + let maybe_pop_count = do p.mutexarc.get |queue| { + if queue.is_empty() { None } else { + Some(queue.pop()) + } + }; + + match maybe_pop_count { + None => p.condition.wait(), + Some(pop_count) => return pop_count } - state.pop() } } } -fn init() -> (pipe,pipe) { - let m = arc::MutexARC(~[]); - ((&m).clone(), m) +fn init() -> (Pipe, Pipe) { + let m = Pipe { + mutexarc: MutexArc::new(~[]), + condition: WaitQueue::new() + }; + (m.clone(), m) } -fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { +fn thread_ring(i: uint, count: uint, num_chan: Pipe, num_port: Pipe) { let mut num_chan = Some(num_chan); let mut num_port = Some(num_port); // Send/Receive lots of messages. for uint::range(0u, count) |j| { //error!("task %?, iter %?", i, j); + let mut num_chan2 = num_chan.take_unwrap(); let mut num_port2 = num_port.take_unwrap(); - send(&num_chan2, i * j); + send(&mut num_chan2, i * j); + num_chan = Some(num_chan2); - let _n = recv(&num_port2); + let _n = recv(&mut num_port2); //log(error, _n); num_port = Some(num_port2); }; @@ -72,7 +88,7 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"10", ~"100"] @@ -84,7 +100,7 @@ fn main() { let msg_per_task = uint::from_str(args[2]).get(); let (num_chan, num_port) = init(); - let mut num_chan = Cell::new(num_chan); + let num_chan = Cell::new(num_chan); let start = time::precise_time_s(); diff --git a/src/test/bench/msgsend-ring-rw-arcs.rs b/src/test/bench/msgsend-ring-rw-arcs.rs index 0c6b97c6b7819..fb6420c6dd165 100644 --- a/src/test/bench/msgsend-ring-rw-arcs.rs +++ b/src/test/bench/msgsend-ring-rw-arcs.rs @@ -17,50 +17,61 @@ extern mod extra; -use extra::arc; +use extra::sync::{RWArc, WaitQueue}; use extra::future; use extra::time; use std::cell::Cell; use std::io; use std::os; use std::uint; -use std::vec; -// A poor man's pipe. -type pipe = arc::RWARC<~[uint]>; -fn send(p: &pipe, msg: uint) { - do p.write_cond |state, cond| { - state.push(msg); - cond.signal(); +#[deriving(Clone)] +struct Pipe { + rwarc: RWArc<~[uint]>, + condition: WaitQueue +} + +fn send(p: &mut Pipe, msg: uint) { + do p.rwarc.write |queue| { + queue.push(msg) } + p.condition.signal(); } -fn recv(p: &pipe) -> uint { - do p.write_cond |state, cond| { - while state.is_empty() { - cond.wait(); + +fn recv(p: &mut Pipe) -> uint { + loop { + let maybe_pop_count = do p.rwarc.write |queue| { + if queue.is_empty() { None } else { + Some(queue.pop()) + } + }; + + match maybe_pop_count { + None => p.condition.wait(), + Some(pop_count) => return pop_count } - state.pop() } } -fn init() -> (pipe,pipe) { - let x = arc::RWARC(~[]); - ((&x).clone(), x) +fn init() -> (Pipe, Pipe) { + let x = Pipe { rwarc: RWArc::new(~[]), condition: WaitQueue::new() }; + (x.clone(), x) } - -fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { +fn thread_ring(i: uint, count: uint, num_chan: Pipe, num_port: Pipe) { let mut num_chan = Some(num_chan); let mut num_port = Some(num_port); // Send/Receive lots of messages. for uint::range(0u, count) |j| { //error!("task %?, iter %?", i, j); + let mut num_chan2 = num_chan.take_unwrap(); let mut num_port2 = num_port.take_unwrap(); - send(&num_chan2, i * j); + send(&mut num_chan2, i * j); + num_chan = Some(num_chan2); - let _n = recv(&num_port2); + let _n = recv(&mut num_port2); //log(error, _n); num_port = Some(num_port2); }; @@ -68,7 +79,7 @@ fn thread_ring(i: uint, count: uint, num_chan: pipe, num_port: pipe) { fn main() { let args = os::args(); - let args = if os::getenv(~"RUST_BENCH").is_some() { + let args = if os::getenv("RUST_BENCH").is_some() { ~[~"", ~"100", ~"10000"] } else if args.len() <= 1u { ~[~"", ~"10", ~"100"] @@ -80,7 +91,7 @@ fn main() { let msg_per_task = uint::from_str(args[2]).get(); let (num_chan, num_port) = init(); - let mut num_chan = Cell::new(num_chan); + let num_chan = Cell::new(num_chan); let start = time::precise_time_s(); diff --git a/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs deleted file mode 100644 index b00b701191e2e..0000000000000 --- a/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of return value does not outlive the function call -extern mod extra; -use extra::arc; -fn main() { - let x = ~arc::RWARC(1); - let mut y = None; - do x.write_cond |_one, cond| { - y = Some(cond); - } - y.unwrap().wait(); -} diff --git a/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs deleted file mode 100644 index 6d4b774fd5f60..0000000000000 --- a/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern mod extra; -use extra::arc; -fn main() { - let x = ~arc::RWARC(1); - let mut y = None; - do x.write_downgrade |write_mode| { - y = Some(x.downgrade(write_mode)); - //~^ ERROR cannot infer an appropriate lifetime - } - y.get(); - // Adding this line causes a method unification failure instead - // do (&option::unwrap(y)).read |state| { assert!(*state == 1); } -} diff --git a/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs index 001e6cf922f67..481e5316ecf7a 100644 --- a/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs +++ b/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs @@ -9,14 +9,12 @@ // except according to those terms. extern mod extra; -use extra::arc; +use extra::sync::shared_mut::rwarc; fn main() { - let x = ~arc::RWARC(1); - let mut y = None; //~ ERROR lifetime of variable does not enclose its declaration - do x.write |one| { - y = Some(one); - } - *y.unwrap() = 2; - //~^ ERROR lifetime of return value does not outlive the function call - //~^^ ERROR dereference of reference outside its lifetime + let mut x = rwarc::RWArc::new(1); + let y = { + let mut write_locked = x.write_locked(); + write_locked.get() //~ ERROR borrowed value does not live long enough + }; + *y = 2; } diff --git a/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs deleted file mode 100644 index 59e899dbbf2ef..0000000000000 --- a/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of variable does not enclose its declaration -extern mod extra; -use extra::arc; -fn main() { - let x = ~arc::RWARC(1); - let mut y = None; - do x.write_downgrade |write_mode| { - do (&write_mode).write_cond |_one, cond| { - y = Some(cond); - } - } - y.unwrap().wait(); -} diff --git a/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs deleted file mode 100644 index 2599fb4dfa0c7..0000000000000 --- a/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of variable does not enclose its declaration -extern mod extra; -use extra::arc; -fn main() { - let x = ~arc::RWARC(1); - let mut y = None; - do x.write_downgrade |write_mode| { - y = Some(write_mode); - } - y.get(); - // Adding this line causes a method unification failure instead - // do (&option::unwrap(y)).write |state| { assert!(*state == 1); } -} diff --git a/src/test/compile-fail/no-capture-arc.rs b/src/test/compile-fail/no-capture-arc.rs index b036071fd8794..bcdca5e14f1ed 100644 --- a/src/test/compile-fail/no-capture-arc.rs +++ b/src/test/compile-fail/no-capture-arc.rs @@ -11,13 +11,13 @@ // error-pattern: use of moved value extern mod extra; -use extra::arc; +use extra::sync::arc; use std::task; fn main() { let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - let arc_v = arc::ARC(v); + let arc_v = arc::Arc::new(v); do task::spawn() { let v = arc_v.get(); diff --git a/src/test/compile-fail/no-reuse-move-arc.rs b/src/test/compile-fail/no-reuse-move-arc.rs index 28f3ea7af9f8c..73b1314b39ef5 100644 --- a/src/test/compile-fail/no-reuse-move-arc.rs +++ b/src/test/compile-fail/no-reuse-move-arc.rs @@ -9,13 +9,13 @@ // except according to those terms. extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::task; fn main() { let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - let arc_v = arc::ARC(v); + let arc_v = Arc::new(v); do task::spawn() { let v = arc_v.get(); diff --git a/src/test/compile-fail/once-cant-call-twice-on-heap.rs b/src/test/compile-fail/once-cant-call-twice-on-heap.rs index 4436675d69a5f..783c2f168651b 100644 --- a/src/test/compile-fail/once-cant-call-twice-on-heap.rs +++ b/src/test/compile-fail/once-cant-call-twice-on-heap.rs @@ -12,7 +12,7 @@ // This program would segfault if it were legal. extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: ~once fn()) { @@ -21,7 +21,7 @@ fn foo(blk: ~once fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); diff --git a/src/test/compile-fail/once-cant-call-twice-on-stack.rs b/src/test/compile-fail/once-cant-call-twice-on-stack.rs index 10877be549e28..285b391e86d59 100644 --- a/src/test/compile-fail/once-cant-call-twice-on-stack.rs +++ b/src/test/compile-fail/once-cant-call-twice-on-stack.rs @@ -13,7 +13,7 @@ // compile-flags:-Z once-fns extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: &once fn()) { @@ -22,7 +22,7 @@ fn foo(blk: &once fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); diff --git a/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs b/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs index 61f158cec27e3..503f89aeaeb09 100644 --- a/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs +++ b/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs @@ -12,7 +12,7 @@ // This program would segfault if it were legal. extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: ~fn()) { @@ -21,7 +21,7 @@ fn foo(blk: ~fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); //~ ERROR cannot move out of captured outer variable diff --git a/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs b/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs index 42c8b9a999821..e2741cc1c1db2 100644 --- a/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs +++ b/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs @@ -12,7 +12,7 @@ // This program would segfault if it were legal. extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: &fn()) { @@ -21,7 +21,7 @@ fn foo(blk: &fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); //~ ERROR cannot move out of captured outer variable diff --git a/src/test/compile-fail/sync-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-cond-shouldnt-escape.rs deleted file mode 100644 index 2006027e79707..0000000000000 --- a/src/test/compile-fail/sync-cond-shouldnt-escape.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of variable does not enclose its declaration -extern mod extra; -use extra::sync; - -fn main() { - let m = ~sync::Mutex(); - let mut cond = None; - do m.lock_cond |c| { - cond = Some(c); - } - cond.unwrap().signal(); -} diff --git a/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs deleted file mode 100644 index 4108201f91159..0000000000000 --- a/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of method receiver does not outlive the method call -extern mod extra; -use extra::sync; -fn main() { - let x = ~sync::RWlock(); - let mut y = None; - do x.write_cond |cond| { - y = Some(cond); - } - y.unwrap().wait(); -} diff --git a/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs deleted file mode 100644 index 4bec5fa270ab8..0000000000000 --- a/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: cannot infer an appropriate lifetime -extern mod extra; -use extra::sync; -fn main() { - let x = ~sync::RWlock(); - let mut y = None; - do x.write_downgrade |write_mode| { - y = Some(x.downgrade(write_mode)); - } - // Adding this line causes a method unification failure instead - // do (&option::unwrap(y)).read { } -} diff --git a/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs deleted file mode 100644 index 43b4d9aabb876..0000000000000 --- a/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of variable does not enclose its declaration -extern mod extra; -use extra::sync; -fn main() { - let x = ~sync::RWlock(); - let mut y = None; - do x.write_downgrade |write_mode| { - do (&write_mode).write_cond |cond| { - y = Some(cond); - } - } - y.unwrap().wait(); -} diff --git a/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs deleted file mode 100644 index 15af7be524687..0000000000000 --- a/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern: lifetime of variable does not enclose its declaration -extern mod extra; -use extra::sync; -fn main() { - let x = ~sync::RWlock(); - let mut y = None; - do x.write_downgrade |write_mode| { - y = Some(write_mode); - } - // Adding this line causes a method unification failure instead - // do (&option::unwrap(y)).write { } -} diff --git a/src/test/compile-fail/unlocking-prevents-values-from-continuing-to-be-used.rs b/src/test/compile-fail/unlocking-prevents-values-from-continuing-to-be-used.rs new file mode 100644 index 0000000000000..ac73df777fce2 --- /dev/null +++ b/src/test/compile-fail/unlocking-prevents-values-from-continuing-to-be-used.rs @@ -0,0 +1,33 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern mod extra; +use extra::sync::shared_mut::rwarc::RWArc; +use extra::sync::unlock::Unlock; + +fn main() { + let mut arc = RWArc::new(0u); + let arc2 = arc.clone(); + let mut write_locked = arc.write_locked(); + let _value = write_locked.get(); + + do write_locked.unlock { //~ ERROR cannot borrow `write_locked` as mutable more than once at a time + let mut arc3 = arc2; + + // Can't bring a the other lock handle inside + let mut write_locked = arc3.write_locked(); + let value = write_locked.get(); + + // This breaks the sole ownership assumption given by + // borrowing, and so must be banned because value is still in + // scope. + *value = 2; + } +} diff --git a/src/test/run-fail/issue-2444.rs b/src/test/run-fail/issue-2444.rs index 0ab1528e4fb75..af871ea2d309c 100644 --- a/src/test/run-fail/issue-2444.rs +++ b/src/test/run-fail/issue-2444.rs @@ -11,9 +11,9 @@ // error-pattern:explicit failure extern mod extra; -use extra::arc; +use extra::sync::Arc; -enum e { e(arc::ARC) } +enum e { e(Arc) } fn foo() -> e {fail!();} diff --git a/src/test/run-pass/bind-by-move.rs b/src/test/run-pass/bind-by-move.rs index 5cde389ff7536..6e7598a61e830 100644 --- a/src/test/run-pass/bind-by-move.rs +++ b/src/test/run-pass/bind-by-move.rs @@ -1,4 +1,4 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -10,11 +10,11 @@ // xfail-fast extern mod extra; -use extra::arc; -fn dispose(_x: arc::ARC) { unsafe { } } +use extra::sync::arc::Arc; +fn dispose(_x: Arc) { unsafe { } } pub fn main() { - let p = arc::ARC(true); + let p = Arc::new(true); let x = Some(p); match x { Some(z) => { dispose(z); }, diff --git a/src/test/run-pass/once-move-out-on-heap.rs b/src/test/run-pass/once-move-out-on-heap.rs index 38b23fd128d4e..4ef66cbb6636e 100644 --- a/src/test/run-pass/once-move-out-on-heap.rs +++ b/src/test/run-pass/once-move-out-on-heap.rs @@ -13,7 +13,7 @@ // xfail-fast extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: ~once fn()) { @@ -21,7 +21,7 @@ fn foo(blk: ~once fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); diff --git a/src/test/run-pass/once-move-out-on-stack.rs b/src/test/run-pass/once-move-out-on-stack.rs index e881f5766736c..cc1e830f5fb75 100644 --- a/src/test/run-pass/once-move-out-on-stack.rs +++ b/src/test/run-pass/once-move-out-on-stack.rs @@ -14,7 +14,7 @@ // compile-flags:-Z once-fns extern mod extra; -use extra::arc; +use extra::sync::arc::Arc; use std::util; fn foo(blk: &once fn()) { @@ -22,7 +22,7 @@ fn foo(blk: &once fn()) { } fn main() { - let x = arc::ARC(true); + let x = Arc::new(true); do foo { assert!(*x.get()); util::ignore(x); diff --git a/src/test/run-pass/trait-bounds-in-arc.rs b/src/test/run-pass/trait-bounds-in-arc.rs index a3b2ea02db358..195871999f773 100644 --- a/src/test/run-pass/trait-bounds-in-arc.rs +++ b/src/test/run-pass/trait-bounds-in-arc.rs @@ -8,13 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Tests that a heterogeneous list of existential types can be put inside an ARC +// Tests that a heterogeneous list of existential types can be put inside an Shared // and shared between tasks as long as all types fulfill Freeze+Send. // xfail-fast extern mod extra; -use extra::arc; +use extra::sync::arc; use std::comm; use std::task; use std::cell; @@ -64,7 +64,7 @@ fn main() { let dogge1 = Dogge { bark_decibels: 100, tricks_known: 42, name: ~"alan_turing" }; let dogge2 = Dogge { bark_decibels: 55, tricks_known: 11, name: ~"albert_einstein" }; let fishe = Goldfyshe { swim_speed: 998, name: ~"alec_guinness" }; - let arc = arc::ARC(~[~catte as ~Pet:Freeze+Send, + let arc = arc::Arc::new(~[~catte as ~Pet:Freeze+Send, ~dogge1 as ~Pet:Freeze+Send, ~fishe as ~Pet:Freeze+Send, ~dogge2 as ~Pet:Freeze+Send]); @@ -82,21 +82,21 @@ fn main() { p3.recv(); } -fn check_legs(arc: arc::ARC<~[~Pet:Freeze+Send]>) { +fn check_legs(arc: arc::Arc<~[~Pet:Freeze+Send]>) { let mut legs = 0; for arc.get().iter().advance |pet| { legs += pet.num_legs(); } assert!(legs == 12); } -fn check_names(arc: arc::ARC<~[~Pet:Freeze+Send]>) { +fn check_names(arc: arc::Arc<~[~Pet:Freeze+Send]>) { for arc.get().iter().advance |pet| { do pet.name |name| { assert!(name[0] == 'a' as u8 && name[1] == 'l' as u8); } } } -fn check_pedigree(arc: arc::ARC<~[~Pet:Freeze+Send]>) { +fn check_pedigree(arc: arc::Arc<~[~Pet:Freeze+Send]>) { for arc.get().iter().advance |pet| { assert!(pet.of_good_pedigree()); }