Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove all uses of pub impl. rs=style #6871

Merged
merged 2 commits into from
Jun 1, 2013
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 40 additions & 34 deletions src/libextra/arc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ pub struct Condvar<'self> {
cond: &'self sync::Condvar<'self>
}

pub impl<'self> Condvar<'self> {
impl<'self> Condvar<'self> {
/// Atomically exit the associated ARC and block until a signal is sent.
#[inline(always)]
fn wait(&self) { self.wait_on(0) }
pub fn wait(&self) { self.wait_on(0) }

/**
* Atomically exit the associated ARC and block on a specified condvar
Expand All @@ -68,7 +68,7 @@ pub impl<'self> Condvar<'self> {
* wait() is equivalent to wait_on(0).
*/
#[inline(always)]
fn wait_on(&self, condvar_id: uint) {
pub fn wait_on(&self, condvar_id: uint) {
assert!(!*self.failed);
self.cond.wait_on(condvar_id);
// This is why we need to wrap sync::condvar.
Expand All @@ -77,28 +77,28 @@ pub impl<'self> Condvar<'self> {

/// Wake up a blocked task. Returns false if there was no blocked task.
#[inline(always)]
fn signal(&self) -> bool { self.signal_on(0) }
pub fn signal(&self) -> bool { self.signal_on(0) }

/**
* Wake up a blocked task on a specified condvar (as
* sync::cond.signal_on). Returns false if there was no blocked task.
*/
#[inline(always)]
fn signal_on(&self, condvar_id: uint) -> bool {
pub fn signal_on(&self, condvar_id: uint) -> bool {
assert!(!*self.failed);
self.cond.signal_on(condvar_id)
}

/// Wake up all blocked tasks. Returns the number of tasks woken.
#[inline(always)]
fn broadcast(&self) -> uint { self.broadcast_on(0) }
pub fn broadcast(&self) -> uint { self.broadcast_on(0) }

/**
* Wake up all blocked tasks on a specified condvar (as
* sync::cond.broadcast_on). Returns Returns the number of tasks woken.
*/
#[inline(always)]
fn broadcast_on(&self, condvar_id: uint) -> uint {
pub fn broadcast_on(&self, condvar_id: uint) -> uint {
assert!(!*self.failed);
self.cond.broadcast_on(condvar_id)
}
Expand All @@ -120,8 +120,8 @@ pub fn ARC<T:Const + Owned>(data: T) -> ARC<T> {
* Access the underlying data in an atomically reference counted
* wrapper.
*/
pub impl<T:Const+Owned> ARC<T> {
fn get<'a>(&'a self) -> &'a T {
impl<T:Const+Owned> ARC<T> {
pub fn get<'a>(&'a self) -> &'a T {
unsafe { &*self.x.get_immut() }
}
}
Expand Down Expand Up @@ -173,7 +173,7 @@ impl<T:Owned> Clone for MutexARC<T> {
}
}

pub impl<T:Owned> MutexARC<T> {
impl<T:Owned> MutexARC<T> {

/**
* Access the underlying mutable data with mutual exclusion from other
Expand All @@ -199,7 +199,7 @@ pub impl<T:Owned> MutexARC<T> {
* blocked on the mutex) will also fail immediately.
*/
#[inline(always)]
unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
pub unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe {
let state = self.x.get();
// Borrowck would complain about this if the function were
Expand All @@ -214,10 +214,10 @@ pub impl<T:Owned> MutexARC<T> {

/// As access(), but with a condvar, as sync::mutex.lock_cond().
#[inline(always)]
unsafe fn access_cond<'x, 'c, U>(
&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U
{
pub unsafe fn access_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T,
c: &'c Condvar) -> U)
-> U {
let state = self.x.get();
do (&(*state).lock).lock_cond |cond| {
check_poison(true, (*state).failed);
Expand Down Expand Up @@ -302,16 +302,18 @@ pub fn rw_arc_with_condvars<T:Const + Owned>(
RWARC { x: UnsafeAtomicRcBox::new(data), cant_nest: () }
}

pub impl<T:Const + Owned> RWARC<T> {
impl<T:Const + Owned> RWARC<T> {
/// Duplicate a rwlock-protected ARC, as arc::clone.
fn clone(&self) -> RWARC<T> {
RWARC { x: self.x.clone(),
cant_nest: () }
pub fn clone(&self) -> RWARC<T> {
RWARC {
x: self.x.clone(),
cant_nest: (),
}
}

}

pub impl<T:Const + Owned> RWARC<T> {
impl<T:Const + Owned> RWARC<T> {
/**
* Access the underlying data mutably. Locks the rwlock in write mode;
* other readers and writers will block.
Expand All @@ -323,7 +325,7 @@ pub impl<T:Const + Owned> RWARC<T> {
* poison the ARC, so subsequent readers and writers will both also fail.
*/
#[inline(always)]
fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
pub fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write {
Expand All @@ -333,11 +335,12 @@ pub impl<T:Const + Owned> RWARC<T> {
}
}
}

/// As write(), but with a condvar, as sync::rwlock.write_cond().
#[inline(always)]
fn write_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
pub fn write_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_cond |cond| {
Expand All @@ -350,6 +353,7 @@ pub impl<T:Const + Owned> RWARC<T> {
}
}
}

/**
* Access the underlying data immutably. May run concurrently with other
* reading tasks.
Expand All @@ -359,7 +363,7 @@ pub impl<T:Const + Owned> RWARC<T> {
* Failing will unlock the ARC while unwinding. However, unlike all other
* access modes, this will not poison the ARC.
*/
fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
pub fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
unsafe {
let state = self.x.get();
do (*state).lock.read {
Expand Down Expand Up @@ -389,7 +393,7 @@ pub impl<T:Const + Owned> RWARC<T> {
* }
* ~~~
*/
fn write_downgrade<U>(&self, blk: &fn(v: RWWriteMode<T>) -> U) -> U {
pub fn write_downgrade<U>(&self, blk: &fn(v: RWWriteMode<T>) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_downgrade |write_mode| {
Expand All @@ -404,7 +408,8 @@ pub impl<T:Const + Owned> RWARC<T> {
}

/// To be called inside of the write_downgrade block.
fn downgrade<'a>(&self, token: RWWriteMode<'a, T>) -> RWReadMode<'a, T> {
pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>)
-> RWReadMode<'a, T> {
unsafe {
// The rwlock should assert that the token belongs to us for us.
let state = self.x.get();
Expand Down Expand Up @@ -451,9 +456,9 @@ pub struct RWReadMode<'self, T> {
token: sync::RWlockReadMode<'self>,
}

pub impl<'self, T:Const + Owned> RWWriteMode<'self, T> {
impl<'self, T:Const + Owned> RWWriteMode<'self, T> {
/// Access the pre-downgrade RWARC in write mode.
fn write<U>(&mut self, blk: &fn(x: &mut T) -> U) -> U {
pub fn write<U>(&mut self, blk: &fn(x: &mut T) -> U) -> U {
match *self {
RWWriteMode {
data: &ref mut data,
Expand All @@ -466,10 +471,11 @@ pub impl<'self, T:Const + Owned> RWWriteMode<'self, T> {
}
}
}

/// Access the pre-downgrade RWARC in write mode with a condvar.
fn write_cond<'x, 'c, U>(&mut self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
pub fn write_cond<'x, 'c, U>(&mut self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
match *self {
RWWriteMode {
data: &ref mut data,
Expand All @@ -491,9 +497,9 @@ pub impl<'self, T:Const + Owned> RWWriteMode<'self, T> {
}
}

pub impl<'self, T:Const + Owned> RWReadMode<'self, T> {
impl<'self, T:Const + Owned> RWReadMode<'self, T> {
/// Access the post-downgrade rwlock in read mode.
fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
pub fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
match *self {
RWReadMode {
data: data,
Expand Down
20 changes: 10 additions & 10 deletions src/libextra/arena.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,9 @@ unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) {
(transmute(p & !1), p & 1 == 1)
}

pub impl Arena {
impl Arena {
// Functions for the POD part of the arena
priv fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data);
let new_min_chunk_size = uint::max(n_bytes, chunk_size);
Expand All @@ -180,7 +180,7 @@ pub impl Arena {
}

#[inline(always)]
priv fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
// XXX: Borrow check
let head = transmute_mut_region(&mut self.pod_head);
Expand All @@ -200,7 +200,7 @@ pub impl Arena {
}

#[inline(always)]
priv fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
unsafe {
let tydesc = sys::get_type_desc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
Expand All @@ -211,8 +211,8 @@ pub impl Arena {
}

// Functions for the non-POD part of the arena
priv fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data);
let new_min_chunk_size = uint::max(n_bytes, chunk_size);
Expand All @@ -224,8 +224,8 @@ pub impl Arena {
}

#[inline(always)]
priv fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let head = transmute_mut_region(&mut self.head);

Expand All @@ -247,7 +247,7 @@ pub impl Arena {
}

#[inline(always)]
priv fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
unsafe {
let tydesc = sys::get_type_desc::<T>();
let (ty_ptr, ptr) =
Expand All @@ -269,7 +269,7 @@ pub impl Arena {

// The external interface
#[inline(always)]
fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
pub fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut_region(self);
Expand Down
Loading