Skip to content

Commit

Permalink
Upgrade to Rust v1.49.0
Browse files Browse the repository at this point in the history
  • Loading branch information
mvines committed Jan 24, 2021
1 parent 7604edb commit 312ba7e
Show file tree
Hide file tree
Showing 36 changed files with 148 additions and 135 deletions.
2 changes: 1 addition & 1 deletion ci/docker-rust-nightly/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.48.0
FROM solanalabs/rust:1.49.0
ARG date

RUN set -x \
Expand Down
2 changes: 1 addition & 1 deletion ci/docker-rust/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.48.0
FROM rust:1.49.0

# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
Expand Down
4 changes: 2 additions & 2 deletions ci/rust-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.48.0
stable_version=1.49.0
fi

if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2020-12-13
nightly_version=2021-01-23
fi


Expand Down
4 changes: 2 additions & 2 deletions clap-utils/src/input_parsers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,12 @@ pub fn resolve_signer(
name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn std::error::Error>> {
Ok(resolve_signer_from_path(
resolve_signer_from_path(
matches,
matches.value_of(name).unwrap(),
name,
wallet_manager,
)?)
)
}

pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
Expand Down
26 changes: 13 additions & 13 deletions client/src/client_error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,16 @@ impl From<TransportError> for ClientErrorKind {
}
}

impl Into<TransportError> for ClientErrorKind {
fn into(self) -> TransportError {
match self {
Self::Io(err) => TransportError::IoError(err),
Self::TransactionError(err) => TransportError::TransactionError(err),
Self::Reqwest(err) => TransportError::Custom(format!("{:?}", err)),
Self::RpcError(err) => TransportError::Custom(format!("{:?}", err)),
Self::SerdeJson(err) => TransportError::Custom(format!("{:?}", err)),
Self::SigningError(err) => TransportError::Custom(format!("{:?}", err)),
Self::Custom(err) => TransportError::Custom(format!("{:?}", err)),
impl From<ClientErrorKind> for TransportError {
fn from(client_error_kind: ClientErrorKind) -> Self {
match client_error_kind {
ClientErrorKind::Io(err) => Self::IoError(err),
ClientErrorKind::TransactionError(err) => Self::TransactionError(err),
ClientErrorKind::Reqwest(err) => Self::Custom(format!("{:?}", err)),
ClientErrorKind::RpcError(err) => Self::Custom(format!("{:?}", err)),
ClientErrorKind::SerdeJson(err) => Self::Custom(format!("{:?}", err)),
ClientErrorKind::SigningError(err) => Self::Custom(format!("{:?}", err)),
ClientErrorKind::Custom(err) => Self::Custom(format!("{:?}", err)),
}
}
}
Expand Down Expand Up @@ -100,9 +100,9 @@ impl From<TransportError> for ClientError {
}
}

impl Into<TransportError> for ClientError {
fn into(self) -> TransportError {
self.kind.into()
impl From<ClientError> for TransportError {
fn from(client_error: ClientError) -> Self {
client_error.kind.into()
}
}

Expand Down
1 change: 1 addition & 0 deletions core/src/broadcast_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ fn update_peer_stats(
) {
let now = timestamp();
let last = last_datapoint_submit.load(Ordering::Relaxed);
#[allow(deprecated)]
if now.saturating_sub(last) > 1000
&& last_datapoint_submit.compare_and_swap(last, now, Ordering::Relaxed) == last
{
Expand Down
1 change: 1 addition & 0 deletions core/src/broadcast_stage/standard_broadcast_run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,7 @@ impl StandardBroadcastRun {
let mut get_peers_time = Measure::start("broadcast::get_peers");
let now = timestamp();
let last = self.last_peer_update.load(Ordering::Relaxed);
#[allow(deprecated)]
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
&& self
.last_peer_update
Expand Down
1 change: 1 addition & 0 deletions core/src/cluster_info_vote_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,7 @@ impl ClusterInfoVoteListener {
let last_version = bank.last_vote_sync.load(Ordering::Relaxed);
let (new_version, msgs) = verified_vote_packets.get_latest_votes(last_version);
verified_packets_sender.send(msgs)?;
#[allow(deprecated)]
bank.last_vote_sync.compare_and_swap(
last_version,
new_version,
Expand Down
10 changes: 2 additions & 8 deletions core/src/commitment_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,15 +263,9 @@ mod tests {
#[test]
fn test_get_highest_confirmed_root() {
assert_eq!(get_highest_confirmed_root(vec![], 10), 0);
let mut rooted_stake = vec![];
rooted_stake.push((0, 5));
rooted_stake.push((1, 5));
let rooted_stake = vec![(0, 5), (1, 5)];
assert_eq!(get_highest_confirmed_root(rooted_stake, 10), 0);
let mut rooted_stake = vec![];
rooted_stake.push((1, 5));
rooted_stake.push((0, 10));
rooted_stake.push((2, 5));
rooted_stake.push((1, 4));
let rooted_stake = vec![(1, 5), (0, 10), (2, 5), (1, 4)];
assert_eq!(get_highest_confirmed_root(rooted_stake, 10), 1);
}

Expand Down
8 changes: 4 additions & 4 deletions core/src/crds_gossip_pull.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,10 @@ impl CrdsFilterSet {
}
}

impl Into<Vec<CrdsFilter>> for CrdsFilterSet {
fn into(self) -> Vec<CrdsFilter> {
let mask_bits = self.mask_bits;
self.filters
impl From<CrdsFilterSet> for Vec<CrdsFilter> {
fn from(cfs: CrdsFilterSet) -> Self {
let mask_bits = cfs.mask_bits;
cfs.filters
.into_iter()
.enumerate()
.map(|(seed, filter)| CrdsFilter {
Expand Down
2 changes: 1 addition & 1 deletion core/src/crds_value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ impl<'de> Deserialize<'de> for Vote {
from: Pubkey,
transaction: Transaction,
wallclock: u64,
};
}
let vote = Vote::deserialize(deserializer)?;
let vote = match vote.transaction.sanitize() {
Ok(_) => Self::new(vote.from, vote.transaction, vote.wallclock),
Expand Down
2 changes: 1 addition & 1 deletion core/src/poh_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ impl PohService {
// sleep is not accurate enough to get a predictable time.
// Kernel can not schedule the thread for a while.
while (now.elapsed().as_nanos() as u64) < target_tick_ns {
std::sync::atomic::spin_loop_hint();
std::hint::spin_loop();
}
total_sleep_us += (now.elapsed().as_nanos() as u64 - elapsed_ns) / 1000;
now = Instant::now();
Expand Down
2 changes: 2 additions & 0 deletions core/src/retransmit_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ fn update_retransmit_stats(

let now = timestamp();
let last = stats.last_ts.load(Ordering::Relaxed);
#[allow(deprecated)]
if now.saturating_sub(last) > 2000
&& stats.last_ts.compare_and_swap(last, now, Ordering::Relaxed) == last
{
Expand Down Expand Up @@ -310,6 +311,7 @@ fn retransmit(

let now = timestamp();
let last = last_peer_update.load(Ordering::Relaxed);
#[allow(deprecated)]
if now.saturating_sub(last) > 1000
&& last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
{
Expand Down
4 changes: 2 additions & 2 deletions core/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ impl JsonRpcRequestProcessor {
// If the starting slot is lower than what's available in blockstore assume the entire
// [start_slot..end_slot] can be fetched from BigTable.
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return Ok(self
return self
.runtime_handle
.block_on(
bigtable_ledger_storage
Expand All @@ -764,7 +764,7 @@ impl JsonRpcRequestProcessor {
"BigTable query failed (maybe timeout due to too large range?)"
.to_string(),
)
})?);
});
}
}

Expand Down
11 changes: 6 additions & 5 deletions core/src/tpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,12 @@ impl Tpu {
}

pub fn join(self) -> thread::Result<()> {
let mut results = vec![];
results.push(self.fetch_stage.join());
results.push(self.sigverify_stage.join());
results.push(self.cluster_info_vote_listener.join());
results.push(self.banking_stage.join());
let results = vec![
self.fetch_stage.join(),
self.sigverify_stage.join(),
self.cluster_info_vote_listener.join(),
self.banking_stage.join(),
];
let broadcast_result = self.broadcast_stage.join();
for result in results {
result?;
Expand Down
1 change: 1 addition & 0 deletions frozen-abi/macro/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ fn quote_for_specialization_detection() -> TokenStream2 {
std::sync::atomic::AtomicBool::new(false);
}

#[allow(deprecated)]
if !SPECIALIZATION_DETECTOR_INJECTED.compare_and_swap(
false,
true,
Expand Down
12 changes: 6 additions & 6 deletions ledger-tool/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -645,15 +645,15 @@ fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {

fn load_bank_forks(
arg_matches: &ArgMatches,
ledger_path: &PathBuf,
ledger_path: &Path,
genesis_config: &GenesisConfig,
process_options: ProcessOptions,
access_type: AccessType,
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
snapshot_archive_path: Option<PathBuf>,
) -> bank_forks_utils::LoadResult {
let blockstore = open_blockstore(&ledger_path, access_type, wal_recovery_mode);
let snapshot_path = ledger_path.clone().join(if blockstore.is_primary_access() {
let snapshot_path = ledger_path.join(if blockstore.is_primary_access() {
"snapshot"
} else {
"snapshot.ledger-tool"
Expand All @@ -662,7 +662,7 @@ fn load_bank_forks(
None
} else {
let snapshot_package_output_path =
snapshot_archive_path.unwrap_or_else(|| ledger_path.clone());
snapshot_archive_path.unwrap_or_else(|| ledger_path.to_path_buf());
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path,
Expand Down Expand Up @@ -1443,7 +1443,7 @@ fn main() {
last_in_slot: bool,
data_complete: bool,
shred: &'a Shred,
};
}
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX);
let ledger = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
Expand Down Expand Up @@ -2442,13 +2442,13 @@ fn main() {
cluster_points: String,
old_capitalization: u64,
new_capitalization: u64,
};
}
fn format_or_na<T: std::fmt::Display>(
data: Option<T>,
) -> String {
data.map(|data| format!("{}", data))
.unwrap_or_else(|| "N/A".to_owned())
};
}
let mut point_details = detail
.map(|d| d.points.iter().map(Some).collect::<Vec<_>>())
.unwrap_or_default();
Expand Down
8 changes: 5 additions & 3 deletions ledger/src/blockstore_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,10 @@ impl From<&str> for BlockstoreRecoveryMode {
}
}
}
impl Into<DBRecoveryMode> for BlockstoreRecoveryMode {
fn into(self) -> DBRecoveryMode {
match self {

impl From<BlockstoreRecoveryMode> for DBRecoveryMode {
fn from(brm: BlockstoreRecoveryMode) -> Self {
match brm {
BlockstoreRecoveryMode::TolerateCorruptedTailRecords => {
DBRecoveryMode::TolerateCorruptedTailRecords
}
Expand Down Expand Up @@ -404,6 +405,7 @@ pub trait Column {
fn key(index: Self::Index) -> Vec<u8>;
fn index(key: &[u8]) -> Self::Index;
fn primary_index(index: Self::Index) -> Slot;
#[allow(clippy::wrong_self_convention)]
fn as_index(slot: Slot) -> Self::Index;
}

Expand Down
2 changes: 2 additions & 0 deletions metrics/src/counter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ impl Counter {
}
}
pub fn init(&mut self) {
#![allow(deprecated)]
self.lograte
.compare_and_swap(0, Self::default_log_rate(), Ordering::Relaxed);
self.metricsrate
Expand All @@ -188,6 +189,7 @@ impl Counter {
}

let lastlog = self.lastlog.load(Ordering::Relaxed);
#[allow(deprecated)]
let prev = self
.lastlog
.compare_and_swap(lastlog, counts, Ordering::Relaxed);
Expand Down
10 changes: 5 additions & 5 deletions metrics/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ use std::{

type CounterMap = HashMap<(&'static str, u64), CounterPoint>;

impl Into<DataPoint> for CounterPoint {
fn into(self) -> DataPoint {
let mut point = DataPoint::new(self.name);
point.timestamp = self.timestamp;
point.add_field_i64("count", self.count);
impl From<CounterPoint> for DataPoint {
fn from(counter_point: CounterPoint) -> Self {
let mut point = Self::new(counter_point.name);
point.timestamp = counter_point.timestamp;
point.add_field_i64("count", counter_point.count);
point
}
}
Expand Down
17 changes: 8 additions & 9 deletions perf/src/cuda_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,17 @@ impl<T: Clone + Default + Sized> Default for PinnedVec<T> {
}
}

impl<T: Clone + Default + Sized> Into<Vec<T>> for PinnedVec<T> {
fn into(mut self) -> Vec<T> {
if self.pinned {
unpin(self.x.as_mut_ptr());
self.pinned = false;
impl<T: Clone + Default + Sized> From<PinnedVec<T>> for Vec<T> {
fn from(mut pinned_vec: PinnedVec<T>) -> Self {
if pinned_vec.pinned {
unpin(pinned_vec.x.as_mut_ptr());
pinned_vec.pinned = false;
}
self.pinnable = false;
self.recycler = None;
std::mem::take(&mut self.x)
pinned_vec.pinnable = false;
pinned_vec.recycler = None;
std::mem::take(&mut pinned_vec.x)
}
}

pub struct PinnedIter<'a, T>(std::slice::Iter<'a, T>);

pub struct PinnedIterMut<'a, T>(std::slice::IterMut<'a, T>);
Expand Down
9 changes: 6 additions & 3 deletions perf/src/recycler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,12 @@ impl<T: Default + Reset> RecyclerX<T> {
let max_gc = self.stats.max_gc.load(Ordering::Relaxed);
if len > max_gc {
// this is not completely accurate, but for most cases should be fine.
self.stats
.max_gc
.compare_and_swap(max_gc, len, Ordering::Relaxed);
let _ = self.stats.max_gc.compare_exchange(
max_gc,
len,
Ordering::Relaxed,
Ordering::Relaxed,
);
}
let total = self.stats.total.load(Ordering::Relaxed);
let reuse = self.stats.reuse.load(Ordering::Relaxed);
Expand Down
2 changes: 2 additions & 0 deletions runtime/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3908,6 +3908,8 @@ impl AccountsDB {
fn report_store_timings(&self) {
let last = self.stats.last_store_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();

#[allow(deprecated)]
if now.saturating_sub(last) > 1000
&& self
.stats
Expand Down
Loading

0 comments on commit 312ba7e

Please sign in to comment.