This repository has been archived by the owner on Nov 6, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1.7k
/
mod.rs
1834 lines (1671 loc) · 66.8 KB
/
mod.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of Open Ethereum.
// Open Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Open Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Open Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! `BlockChain` synchronization strategy.
//! Syncs to peers and keeps up to date.
//! This implementation uses ethereum protocol v63
//!
//! Syncing strategy summary.
//! Split the chain into ranges of N blocks each. Download ranges sequentially. Split each range into subchains of M blocks. Download subchains in parallel.
//! State.
//! Sync state consists of the following data:
//! - s: State enum which can be one of the following values: `ChainHead`, `Blocks`, `Idle`
//! - H: A set of downloaded block headers
//! - B: A set of downloaded block bodies
//! - S: Set of block subchain start block hashes to download.
//! - l: Last imported / common block hash
//! - P: A set of connected peers. For each peer we maintain its last known total difficulty and starting block hash being requested if any.
//! General behaviour.
//! We start with all sets empty, l is set to the best block in the block chain, s is set to `ChainHead`.
//! If at any moment a bad block is reported by the block queue, we set s to `ChainHead`, reset l to the best block in the block chain and clear H, B and S.
//! If at any moment P becomes empty, we set s to `ChainHead`, and clear H, B and S.
//!
//! Workflow for `ChainHead` state.
//! In this state we try to get subchain headers with a single `GetBlockHeaders` request.
//! On `NewPeer` / On `Restart`:
//! If peer's total difficulty is higher and there are less than 5 peers downloading, request N/M headers with interval M+1 starting from l
//! On `BlockHeaders(R)`:
//! If R is empty:
//! If l is equal to genesis block hash or l is more than 1000 blocks behind our best hash:
//! Remove current peer from P. set l to the best block in the block chain. Select peer with maximum total difficulty from P and restart.
//! Else
//! Set l to l’s parent and restart.
//! Else if we already have all the headers in the block chain or the block queue:
//! Set s to `Idle`,
//! Else
//! Set S to R, set s to `Blocks`.
//!
//! All other messages are ignored.
//!
//! Workflow for `Blocks` state.
//! In this state we download block headers and bodies from multiple peers.
//! On `NewPeer` / On `Restart`:
//! For all idle peers:
//! Find a set of 256 or less block hashes in H which are not in B and not being downloaded by other peers. If the set is not empty:
//! Request block bodies for the hashes in the set.
//! Else
//! Find an element in S which is not being downloaded by other peers. If found: Request M headers starting from the element.
//!
//! On `BlockHeaders(R)`:
//! If R is empty remove current peer from P and restart.
//! Validate received headers:
//! For each header find a parent in H or R or the blockchain. Restart if there is a block with unknown parent.
//! Find at least one header from the received list in S. Restart if there is none.
//! Go to `CollectBlocks`.
//!
//! On `BlockBodies(R)`:
//! If R is empty remove current peer from P and restart.
//! Add bodies with a matching header in H to B.
//! Go to `CollectBlocks`.
//!
//! `CollectBlocks`:
//! Find a chain of blocks C in H starting from h where h’s parent equals to l. The chain ends with the first block which does not have a body in B.
//! Add all blocks from the chain to the block queue. Remove them from H and B. Set l to the hash of the last block from C.
//! Update and merge subchain heads in S. For each h in S find a chain of blocks in B starting from h. Remove h from S. if the chain does not include an element from S add the end of the chain to S.
//! If H is empty and S contains a single element set s to `ChainHead`.
//! Restart.
//!
//! All other messages are ignored.
//! Workflow for Idle state.
//! On `NewBlock`:
//! Import the block. If the block is unknown set s to `ChainHead` and restart.
//! On `NewHashes`:
//! Set s to `ChainHead` and restart.
//!
//! All other messages are ignored.
mod handler;
mod propagator;
mod requester;
mod supplier;
pub mod fork_filter;
pub mod sync_packet;
use std::sync::{Arc, mpsc};
use std::collections::{HashSet, HashMap, BTreeMap};
use std::cmp;
use std::time::{Duration, Instant};
use crate::{
ETH_PROTOCOL, EthProtocolInfo as PeerInfoDigest, PriorityTask, SyncConfig, WarpSync, WARP_SYNC_PROTOCOL_ID,
api::{Notification, PRIORITY_TIMER_INTERVAL},
block_sync::{BlockDownloader, DownloadAction},
chain::fork_filter::ForkFilterApi,
sync_io::SyncIo,
snapshot_sync::Snapshot,
transactions_stats::{TransactionsStats, Stats as TransactionStats},
private_tx::PrivateTxHandler,
};
use bytes::Bytes;
use client_traits::BlockChainClient;
use derive_more::Display;
use ethereum_types::{H256, U256};
use fastmap::{H256FastMap, H256FastSet};
use futures::sync::mpsc as futures_mpsc;
use keccak_hash::keccak;
use log::{error, trace, debug, warn};
use network::client_version::ClientVersion;
use network::{self, PeerId};
use parity_util_mem::{MallocSizeOfExt, malloc_size_of_is_0};
use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
use rand::{Rng, seq::SliceRandom};
use rlp::{RlpStream, DecoderError};
use common_types::{
BlockNumber,
ids::BlockId,
transaction::UnverifiedTransaction,
verification::VerificationQueueInfo as BlockQueueInfo,
blockchain_info::BlockChainInfo,
block_status::BlockStatus,
snapshot::RestorationStatus,
};
use self::handler::SyncHandler;
use self::sync_packet::{PacketInfo, SyncPacket};
use self::sync_packet::SyncPacket::{
NewBlockPacket,
StatusPacket,
};
use self::propagator::SyncPropagator;
use self::requester::SyncRequester;
pub(crate) use self::supplier::SyncSupplier;
malloc_size_of_is_0!(PeerInfo);
/// Possible errors during packet's processing
#[derive(Debug, Display)]
pub enum PacketProcessError {
/// Error of RLP decoder
#[display(fmt = "Decoder Error: {}", _0)]
Decoder(DecoderError),
/// Underlying client is busy and cannot process the packet
/// The packet should be postponed for later response
#[display(fmt = "Underlying client is busy")]
ClientBusy,
}
impl From<DecoderError> for PacketProcessError {
fn from(err: DecoderError) -> Self {
PacketProcessError::Decoder(err).into()
}
}
/// Version 64 of the Ethereum protocol and number of packet IDs reserved by the protocol (packet count).
pub const ETH_PROTOCOL_VERSION_64: (u8, u8) = (64, 0x11);
/// Version 63 of the Ethereum protocol and number of packet IDs reserved by the protocol (packet count).
pub const ETH_PROTOCOL_VERSION_63: (u8, u8) = (63, 0x11);
/// 1 version of Parity protocol and the packet count.
pub const PAR_PROTOCOL_VERSION_1: (u8, u8) = (1, 0x15);
/// 2 version of Parity protocol (consensus messages added).
pub const PAR_PROTOCOL_VERSION_2: (u8, u8) = (2, 0x16);
/// 3 version of Parity protocol (private transactions messages added).
pub const PAR_PROTOCOL_VERSION_3: (u8, u8) = (3, 0x18);
/// 4 version of Parity protocol (private state sync added).
pub const PAR_PROTOCOL_VERSION_4: (u8, u8) = (4, 0x20);
pub const MAX_BODIES_TO_SEND: usize = 256;
pub const MAX_HEADERS_TO_SEND: usize = 512;
/// Maximum number of "entries" to include in a GetDataNode request.
pub const MAX_NODE_DATA_TO_SEND: usize = 1024;
/// Maximum allowed duration for serving a batch GetNodeData request.
const MAX_NODE_DATA_TOTAL_DURATION: Duration = Duration::from_secs(2);
/// Maximum allowed duration for serving a single GetNodeData request.
const MAX_NODE_DATA_SINGLE_DURATION: Duration = Duration::from_millis(100);
pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
const MIN_PEERS_PROPAGATION: usize = 4;
const MAX_PEERS_PROPAGATION: usize = 128;
const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20;
const MAX_NEW_HASHES: usize = 64;
const MAX_NEW_BLOCK_AGE: BlockNumber = 20;
// maximal packet size with transactions (cannot be greater than 16MB - protocol limitation).
// keep it under 8MB as well, cause it seems that it may result oversized after compression.
const MAX_TRANSACTION_PACKET_SIZE: usize = 5 * 1024 * 1024;
// Min number of blocks to be behind the tip for a snapshot sync to be considered useful to us.
const SNAPSHOT_RESTORE_THRESHOLD: BlockNumber = 30000;
/// We prefer to sync snapshots that are available from this many peers. If we have not found a
/// snapshot available from `SNAPSHOT_MIN_PEERS` peers within `WAIT_PEERS_TIMEOUT`, then we make do
/// with a single peer to sync from.
const SNAPSHOT_MIN_PEERS: usize = 3;
/// To keep memory from growing uncontrollably we restore chunks as we download them and write them
/// to disk only after we have processed them; we also want to avoid pausing the chunk download too
/// often, so we allow a little bit of leeway here and let the downloading be
/// `MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD` chunks ahead of the restoration.
const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 5;
/// Time to wait for snapshotting peers to show up with a snapshot we want to use. Beyond this time,
/// a single peer is enough to start downloading.
const WAIT_PEERS_TIMEOUT: Duration = Duration::from_secs(10);
/// Time to wait for a peer to start being useful to us in some form. After this they are
/// disconnected.
const STATUS_TIMEOUT: Duration = Duration::from_secs(10);
const HEADERS_TIMEOUT: Duration = Duration::from_secs(15);
const BODIES_TIMEOUT: Duration = Duration::from_secs(20);
const RECEIPTS_TIMEOUT: Duration = Duration::from_secs(10);
const FORK_HEADER_TIMEOUT: Duration = Duration::from_secs(3);
/// Max time to wait for the Snapshot Manifest packet to arrive from a peer after it's being asked.
const SNAPSHOT_MANIFEST_TIMEOUT: Duration = Duration::from_secs(5);
const SNAPSHOT_DATA_TIMEOUT: Duration = Duration::from_secs(120);
const PRIVATE_STATE_TIMEOUT: Duration = Duration::from_secs(120);
/// Defines how much time we have to complete priority transaction or block propagation.
/// after the deadline is reached the task is considered finished
/// (so we might sent only to some part of the peers we originally intended to send to)
const PRIORITY_TASK_DEADLINE: Duration = Duration::from_millis(100);
#[derive(Copy, Clone, Eq, PartialEq, Debug, MallocSizeOf)]
/// Sync state
pub enum SyncState {
/// Collecting enough peers to start syncing.
WaitingPeers,
/// Waiting for snapshot manifest download
SnapshotManifest,
/// Downloading snapshot data
SnapshotData,
/// Waiting for snapshot restoration progress.
SnapshotWaiting,
/// Downloading new blocks
Blocks,
/// Initial chain sync complete. Waiting for new packets
Idle,
/// Block downloading paused. Waiting for block queue to process blocks and free some space
Waiting,
/// Downloading blocks learned from `NewHashes` packet
NewBlocks,
}
/// Syncing status and statistics
#[derive(Clone, Copy)]
pub struct SyncStatus {
/// State
pub state: SyncState,
/// Syncing protocol version. That's the maximum protocol version we connect to.
pub protocol_version: u8,
/// The underlying p2p network version.
pub network_id: u64,
/// `BlockChain` height for the moment the sync started.
pub start_block_number: BlockNumber,
/// Last fully downloaded and imported block number (if any).
pub last_imported_block_number: Option<BlockNumber>,
/// Highest block number in the download queue (if any).
pub highest_block_number: Option<BlockNumber>,
/// Total number of blocks for the sync process.
pub blocks_total: BlockNumber,
/// Number of blocks downloaded so far.
pub blocks_received: BlockNumber,
/// Total number of connected peers
pub num_peers: usize,
/// Total number of active peers.
pub num_active_peers: usize,
/// Heap memory used in bytes.
pub mem_used: usize,
/// Snapshot chunks
pub num_snapshot_chunks: usize,
/// Snapshot chunks downloaded
pub snapshot_chunks_done: usize,
/// Last fully downloaded and imported ancient block number (if any).
pub last_imported_old_block_number: Option<BlockNumber>,
}
impl SyncStatus {
/// Indicates if snapshot download is in progress
pub fn is_snapshot_syncing(&self) -> bool {
match self.state {
SyncState::SnapshotManifest |
SyncState::SnapshotData |
SyncState::SnapshotWaiting => true,
_ => false,
}
}
/// Returns max no of peers to display in informants
pub fn current_max_peers(&self, min_peers: u32, max_peers: u32) -> u32 {
if self.num_peers as u32 > min_peers {
max_peers
} else {
min_peers
}
}
/// Is it doing a major sync?
pub fn is_syncing(&self, queue_info: BlockQueueInfo) -> bool {
let is_syncing_state = match self.state { SyncState::Idle | SyncState::NewBlocks => false, _ => true };
let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3;
is_verifying || is_syncing_state
}
}
#[derive(PartialEq, Eq, Debug, Clone)]
/// Peer data type requested from a peer by us.
pub enum PeerAsking {
Nothing,
ForkHeader,
BlockHeaders,
BlockBodies,
BlockReceipts,
SnapshotManifest,
SnapshotData,
PrivateState,
}
#[derive(PartialEq, Eq, Debug, Clone, Copy, MallocSizeOf)]
/// Block downloader channel.
pub enum BlockSet {
/// New blocks better than out best blocks
NewBlocks,
/// Missing old blocks
OldBlocks,
}
#[derive(Clone, Eq, PartialEq, Debug)]
pub enum ForkConfirmation {
/// Fork block confirmation pending.
Unconfirmed,
/// Peer's chain is too short to confirm the fork.
TooShort,
/// Fork is confirmed.
Confirmed,
}
#[derive(Clone, Debug)]
/// Syncing peer information
pub struct PeerInfo {
/// eth protocol version
protocol_version: u8,
/// Peer chain genesis hash
genesis: H256,
/// Peer network id
network_id: u64,
/// Peer best block hash
latest_hash: H256,
/// Peer total difficulty if known
difficulty: Option<U256>,
/// Type of data currently being requested by us from a peer.
asking: PeerAsking,
/// A set of block numbers being requested
asking_blocks: Vec<H256>,
/// Holds requested header hash if currently requesting block header by hash
asking_hash: Option<H256>,
/// Holds requested private state hash
asking_private_state: Option<H256>,
/// Holds requested snapshot chunk hash if any.
asking_snapshot_data: Option<H256>,
/// Request timestamp
ask_time: Instant,
/// Holds a set of transactions recently sent to this peer to avoid spamming.
last_sent_transactions: H256FastSet,
/// Holds a set of private transactions and their signatures recently sent to this peer to avoid spamming.
last_sent_private_transactions: H256FastSet,
/// Pending request is expired and result should be ignored
expired: bool,
/// Private transactions enabled
private_tx_enabled: bool,
/// Peer fork confirmation status
confirmation: ForkConfirmation,
/// Best snapshot hash
snapshot_hash: Option<H256>,
/// Best snapshot block number
snapshot_number: Option<BlockNumber>,
/// Block set requested
block_set: Option<BlockSet>,
/// Version of the software the peer is running
client_version: ClientVersion,
}
impl PeerInfo {
fn can_sync(&self) -> bool {
self.confirmation == ForkConfirmation::Confirmed && !self.expired
}
fn is_allowed(&self) -> bool {
self.confirmation != ForkConfirmation::Unconfirmed && !self.expired
}
fn reset_asking(&mut self) {
self.asking_blocks.clear();
self.asking_hash = None;
self.asking_private_state = None;
// mark any pending requests as expired
if self.asking != PeerAsking::Nothing && self.is_allowed() {
self.expired = true;
}
}
fn reset_private_stats(&mut self) {
self.last_sent_private_transactions.clear();
}
}
#[cfg(not(test))]
pub mod random {
use rand;
pub fn new() -> rand::rngs::ThreadRng { rand::thread_rng() }
}
#[cfg(test)]
pub mod random {
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
const RNG_SEED: [u8; 16] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16];
pub fn new() -> XorShiftRng {
XorShiftRng::from_seed(RNG_SEED)
}
}
pub type RlpResponseResult = Result<Option<(SyncPacket, RlpStream)>, PacketProcessError>;
pub type Peers = HashMap<PeerId, PeerInfo>;
/// Thread-safe wrapper for `ChainSync`.
///
/// NOTE always lock in order of fields declaration
pub struct ChainSyncApi {
/// Priority tasks queue
priority_tasks: Mutex<mpsc::Receiver<PriorityTask>>,
/// The rest of sync data
sync: RwLock<ChainSync>,
}
impl ChainSyncApi {
/// Creates new `ChainSyncApi`
pub fn new(
config: SyncConfig,
chain: &dyn BlockChainClient,
fork_filter: ForkFilterApi,
private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
priority_tasks: mpsc::Receiver<PriorityTask>,
) -> Self {
ChainSyncApi {
sync: RwLock::new(ChainSync::new(config, chain, fork_filter, private_tx_handler)),
priority_tasks: Mutex::new(priority_tasks),
}
}
/// Gives `write` access to underlying `ChainSync`
pub fn write(&self) -> RwLockWriteGuard<ChainSync> {
self.sync.write()
}
/// Returns info about given list of peers
pub fn peer_info(&self, ids: &[PeerId]) -> Vec<Option<PeerInfoDigest>> {
let sync = self.sync.read();
ids.iter().map(|id| sync.peer_info(id)).collect()
}
/// Returns synchonization status
pub fn status(&self) -> SyncStatus {
self.sync.read().status()
}
/// Returns transactions propagation statistics
pub fn transactions_stats(&self) -> BTreeMap<H256, crate::api::TransactionStats> {
self.sync.read().transactions_stats()
.iter()
.map(|(hash, stats)| (*hash, stats.into()))
.collect()
}
/// Dispatch incoming requests and responses
pub fn dispatch_packet(&self, io: &mut dyn SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data)
}
/// Process the queue with requests, that were delayed with response.
pub fn process_delayed_requests(&self, io: &mut dyn SyncIo) {
let requests = self.sync.write().retrieve_delayed_requests();
if !requests.is_empty() {
debug!(target: "sync", "Processing {} delayed requests", requests.len());
for (peer_id, packet_id, packet_data) in requests {
SyncSupplier::dispatch_delayed_request(&self.sync, io, peer_id, packet_id, &packet_data);
}
}
}
/// Process a priority propagation queue.
/// This task is run from a timer and should be time constrained.
/// Hence we set up a deadline for the execution and cancel the task if the deadline is exceeded.
///
/// NOTE This method should only handle stuff that can be canceled and would reach other peers
/// by other means.
/// Called every `PRIORITY_TIMER` (0.25sec)
pub fn process_priority_queue(&self, io: &mut dyn SyncIo) {
fn check_deadline(deadline: Instant) -> Option<Duration> {
let now = Instant::now();
if now > deadline {
None
} else {
Some(deadline - now)
}
}
// deadline to get the task from the queue
let deadline = Instant::now() + PRIORITY_TIMER_INTERVAL;
let mut work = || {
let task = {
let tasks = self.priority_tasks.try_lock_until(deadline)?;
let left = check_deadline(deadline)?;
tasks.recv_timeout(left).ok()?
};
task.starting();
// wait for the sync lock until deadline,
// note we might drop the task here if we won't manage to acquire the lock.
let mut sync = self.sync.try_write_until(deadline)?;
// since we already have everything let's use a different deadline
// to do the rest of the job now, so that previous work is not wasted.
let deadline = Instant::now() + PRIORITY_TASK_DEADLINE;
let as_ms = move |prev| {
let dur: Duration = Instant::now() - prev;
dur.as_secs() * 1_000 + dur.subsec_millis() as u64
};
match task {
// NOTE We can't simply use existing methods,
// cause the block is not in the DB yet.
PriorityTask::PropagateBlock { started, block, hash, difficulty } => {
// try to send to peers that are on the same block as us
// (they will most likely accept the new block).
let chain_info = io.chain().chain_info();
let total_difficulty = chain_info.total_difficulty + difficulty;
let rlp = ChainSync::create_block_rlp(&block, total_difficulty);
for peers in sync.get_peers(&chain_info, PeerState::SameBlock).chunks(10) {
check_deadline(deadline)?;
for peer in peers {
SyncPropagator::send_packet(io, *peer, NewBlockPacket, rlp.clone());
if let Some(ref mut peer) = sync.peers.get_mut(peer) {
peer.latest_hash = hash;
}
}
}
debug!(target: "sync", "Finished block propagation, took {}ms", as_ms(started));
},
PriorityTask::PropagateTransactions(time, _) => {
SyncPropagator::propagate_new_transactions(&mut sync, io, || {
check_deadline(deadline).is_some()
});
debug!(target: "sync", "Finished transaction propagation, took {}ms", as_ms(time));
},
}
Some(())
};
// Process as many items as we can until the deadline is reached.
loop {
if work().is_none() {
return;
}
}
}
}
// Static methods
impl ChainSync {
/// creates rlp to send for the tree defined by 'from' and 'to' hashes
fn create_new_hashes_rlp(chain: &dyn BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> {
match chain.tree_route(from, to) {
Some(route) => {
let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new);
match route.blocks.len() {
0 => None,
_ => {
let mut blocks = route.blocks;
blocks.extend(uncles);
let mut rlp_stream = RlpStream::new_list(blocks.len());
for block_hash in blocks {
let mut hash_rlp = RlpStream::new_list(2);
let number = chain.block_header(BlockId::Hash(block_hash.clone()))
.expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number();
hash_rlp.append(&block_hash);
hash_rlp.append(&number);
rlp_stream.append_raw(hash_rlp.as_raw(), 1);
}
Some(rlp_stream.out())
}
}
},
None => None
}
}
/// creates rlp from block bytes and total difficulty
fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(bytes, 1);
rlp_stream.append(&total_difficulty);
rlp_stream.out()
}
/// creates latest block rlp for the given client
fn create_latest_block_rlp(chain: &dyn BlockChainClient) -> Bytes {
Self::create_block_rlp(
&chain.block(BlockId::Hash(chain.chain_info().best_block_hash))
.expect("Best block always exists").into_inner(),
chain.chain_info().total_difficulty
)
}
/// creates given hash block rlp for the given client
fn create_new_block_rlp(chain: &dyn BlockChainClient, hash: &H256) -> Bytes {
Self::create_block_rlp(
&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(),
chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")
)
}
fn select_random_peers(peers: &[PeerId]) -> Vec<PeerId> {
// take sqrt(x) peers
let mut peers = peers.to_vec();
let mut count = (peers.len() as f64).powf(0.5).round() as usize;
count = cmp::min(count, MAX_PEERS_PROPAGATION);
count = cmp::max(count, MIN_PEERS_PROPAGATION);
peers.shuffle(&mut random::new());
peers.truncate(count);
peers
}
/// Reset the client to its initial state:
/// - if warp sync is enabled, start looking for peers to sync a snapshot from
/// - if `--warp-barrier` is used, ensure we're not synced beyond the barrier and start
/// looking for peers to sync a snapshot from
/// - otherwise, go `Idle`.
fn get_init_state(warp_sync: WarpSync, chain: &dyn BlockChainClient) -> SyncState {
let best_block = chain.chain_info().best_block_number;
match warp_sync {
WarpSync::Enabled => {
debug!(target: "sync", "Setting the initial state to `WaitingPeers`. Our best block: #{}; warp_sync: {:?}", best_block, warp_sync);
SyncState::WaitingPeers
},
WarpSync::OnlyAndAfter(block) if block > best_block => {
debug!(target: "sync", "Setting the initial state to `WaitingPeers`. Our best block: #{}; warp_sync: {:?}", best_block, warp_sync);
SyncState::WaitingPeers
},
_ => {
debug!(target: "sync", "Setting the initial state to `Idle`. Our best block: #{}", best_block);
SyncState::Idle
},
}
}
}
/// A peer query method for getting a list of peers
enum PeerState {
/// Peer is on different hash than us
Lagging,
/// Peer is on the same block as us
SameBlock
}
/// Blockchain sync handler.
/// See module documentation for more details.
#[derive(MallocSizeOf)]
pub struct ChainSync {
/// Sync state
state: SyncState,
/// Last block number for the start of sync
starting_block: BlockNumber,
/// Highest block number seen on the network.
highest_block: Option<BlockNumber>,
/// All connected peers
peers: Peers,
/// Peers active for current sync round
active_peers: HashSet<PeerId>,
/// Block download process for new blocks
new_blocks: BlockDownloader,
/// Block download process for ancient blocks
old_blocks: Option<BlockDownloader>,
/// Last propagated block number
last_sent_block_number: BlockNumber,
/// Network ID
network_id: u64,
/// Fork filter
fork_filter: ForkFilterApi,
/// Optional fork block to check
fork_block: Option<(BlockNumber, H256)>,
/// Snapshot downloader.
snapshot: Snapshot,
/// Connected peers pending Status message.
/// Value is request timestamp.
handshaking_peers: HashMap<PeerId, Instant>,
/// Requests, that can not be processed at the moment
delayed_requests: Vec<(PeerId, u8, Vec<u8>)>,
/// Ids of delayed requests, used for lookup, id is composed from peer id and packet id
delayed_requests_ids: HashSet<(PeerId, u8)>,
/// Sync start timestamp. Measured when first peer is connected
sync_start_time: Option<Instant>,
/// Transactions propagation statistics
transactions_stats: TransactionsStats,
/// Enable ancient block downloading
download_old_blocks: bool,
/// Shared private tx service.
#[ignore_malloc_size_of = "arc on dyn trait here seems tricky, ignoring"]
private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
/// Enable warp sync.
warp_sync: WarpSync,
#[ignore_malloc_size_of = "mpsc unmettered, ignoring"]
status_sinks: Vec<futures_mpsc::UnboundedSender<SyncState>>
}
impl ChainSync {
/// Create a new instance of syncing strategy.
pub fn new(
config: SyncConfig,
chain: &dyn BlockChainClient,
fork_filter: ForkFilterApi,
private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
) -> Self {
let chain_info = chain.chain_info();
let best_block = chain.chain_info().best_block_number;
let state = Self::get_init_state(config.warp_sync, chain);
let mut sync = ChainSync {
state,
starting_block: best_block,
highest_block: None,
peers: HashMap::new(),
handshaking_peers: HashMap::new(),
active_peers: HashSet::new(),
delayed_requests: Vec::new(),
delayed_requests_ids: HashSet::new(),
new_blocks: BlockDownloader::new(BlockSet::NewBlocks, &chain_info.best_block_hash, chain_info.best_block_number),
old_blocks: None,
last_sent_block_number: 0,
network_id: config.network_id,
fork_filter,
fork_block: config.fork_block,
download_old_blocks: config.download_old_blocks,
snapshot: Snapshot::new(),
sync_start_time: None,
transactions_stats: TransactionsStats::default(),
private_tx_handler,
warp_sync: config.warp_sync,
status_sinks: Vec::new()
};
sync.update_targets(chain);
sync
}
/// Returns synchronization status
pub fn status(&self) -> SyncStatus {
let last_imported_number = self.new_blocks.last_imported_block_number();
SyncStatus {
state: self.state.clone(),
protocol_version: ETH_PROTOCOL_VERSION_64.0,
network_id: self.network_id,
start_block_number: self.starting_block,
last_imported_block_number: Some(last_imported_number),
last_imported_old_block_number: self.old_blocks.as_ref().map(|d| d.last_imported_block_number()),
highest_block_number: self.highest_block.map(|n| cmp::max(n, last_imported_number)),
blocks_received: if last_imported_number > self.starting_block { last_imported_number - self.starting_block } else { 0 },
blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 },
num_peers: self.peers.values().filter(|p| p.is_allowed()).count(),
num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(),
num_snapshot_chunks: self.snapshot.total_chunks(),
snapshot_chunks_done: self.snapshot.done_chunks(),
mem_used: self.malloc_size_of(),
}
}
/// Returns information on peers connections
pub fn peer_info(&self, peer_id: &PeerId) -> Option<PeerInfoDigest> {
self.peers.get(peer_id).map(|peer_data| {
PeerInfoDigest {
version: peer_data.protocol_version as u32,
difficulty: peer_data.difficulty,
head: peer_data.latest_hash,
}
})
}
/// Returns transactions propagation statistics
pub fn transactions_stats(&self) -> &H256FastMap<TransactionStats> {
self.transactions_stats.stats()
}
/// Updates the set of transactions recently sent to this peer to avoid spamming.
pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) {
if let Some(peer_info) = self.peers.get_mut(&peer_id) {
peer_info.last_sent_transactions.extend(txs.iter().map(|tx| tx.hash()));
}
}
/// Abort all sync activity
pub fn abort(&mut self, io: &mut dyn SyncIo) {
self.reset_and_continue(io);
self.peers.clear();
}
/// returns the receiving end of a future::mpsc channel that can
/// be polled for changes to node's SyncState.
pub fn sync_notifications(&mut self) -> Notification<SyncState> {
let (sender, receiver) = futures_mpsc::unbounded();
self.status_sinks.push(sender);
receiver
}
/// Notify all subscribers of a new SyncState
fn notify_sync_state(&mut self, state: SyncState) {
// remove any sender whose receiving end has been dropped
self.status_sinks.retain(|sender| {
sender.unbounded_send(state).is_ok()
});
}
/// sets a new SyncState
fn set_state(&mut self, state: SyncState) {
self.notify_sync_state(state);
self.state = state;
}
/// Reset sync. Clear all downloaded data but keep the queue.
/// Set sync state to the given state or to the initial state if `None` is provided.
fn reset(&mut self, io: &mut dyn SyncIo, state: Option<SyncState>) {
self.new_blocks.reset();
let chain_info = io.chain().chain_info();
for (_, mut p) in &mut self.peers {
if p.block_set != Some(BlockSet::OldBlocks) {
p.reset_asking();
if p.difficulty.is_none() {
// assume peer has up to date difficulty
p.difficulty = Some(chain_info.pending_total_difficulty);
}
}
}
let warp_sync = self.warp_sync;
self.set_state(state.unwrap_or_else(|| Self::get_init_state(warp_sync, io.chain())));
// Reactivate peers only if some progress has been made
// since the last sync round of if starting fresh.
self.active_peers = self.peers.keys().cloned().collect();
}
/// Add a request for later processing
pub fn add_delayed_request(&mut self, peer: PeerId, packet_id: u8, data: &[u8]) {
// Ignore the request, if there is a request already in queue with the same id
if !self.delayed_requests_ids.contains(&(peer, packet_id)) {
self.delayed_requests_ids.insert((peer, packet_id));
self.delayed_requests.push((peer, packet_id, data.to_vec()));
debug!(target: "sync", "Delayed request with packet id {} from peer {} added", packet_id, peer);
}
}
/// Drain and return all delayed requests
pub fn retrieve_delayed_requests(&mut self) -> Vec<(PeerId, u8, Vec<u8>)> {
self.delayed_requests_ids.clear();
self.delayed_requests.drain(..).collect()
}
/// Restart sync
pub fn reset_and_continue(&mut self, io: &mut dyn SyncIo) {
trace!(target: "sync", "Restarting");
if self.state == SyncState::SnapshotData {
debug!(target:"snapshot_sync", "Aborting snapshot restore");
io.snapshot_service().abort_restore();
}
self.snapshot.clear();
// Passing `None` here means we'll end up in either `SnapshotWaiting` or `Idle` depending on
// the warp sync settings.
self.reset(io, None);
self.continue_sync(io);
}
/// Remove peer from active peer set. Peer will be reactivated on the next sync
/// round.
fn deactivate_peer(&mut self, _io: &mut dyn SyncIo, peer_id: PeerId) {
debug!(target: "sync", "Deactivating peer {}", peer_id);
self.active_peers.remove(&peer_id);
}
/// Decide if we should start downloading a snapshot and from who. Called once per second.
fn maybe_start_snapshot_sync(&mut self, io: &mut dyn SyncIo) {
if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() {
return;
}
use SyncState::*;
if self.state != WaitingPeers && self.state != Blocks && self.state != Waiting {
return;
}
// Make sure the snapshot block is not too far away from best block and network best block and
// that it is higher than fork detection block
let our_best_block = io.chain().chain_info().best_block_number;
let fork_block = self.fork_block.map_or(0, |(n, _)| n);
let expected_warp_block = match self.warp_sync {
WarpSync::OnlyAndAfter(warp_block) => {
if our_best_block >= warp_block {
trace!(target: "snapshot_sync",
"Our best block (#{}) is already beyond the warp barrier block (#{})",
our_best_block, warp_block);
return;
}
warp_block
},
_ => 0,
};
// Collect snapshot info from peers and check if we can use their snapshots to sync.
let (best_snapshot_block, best_hash, max_peers, snapshot_peers) = {
let mut snapshots = self.peers.iter()
.filter(|&(_, p)|
// filter out expired peers and peers from whom we do not have fork confirmation.
p.is_allowed() &&
p.snapshot_number.map_or(false, |sn|
// Snapshot must be sufficiently better than what we have that it's useful to
// sync with it: more than 30k blocks beyond our best block
our_best_block < sn && (sn - our_best_block) > SNAPSHOT_RESTORE_THRESHOLD &&
// Snapshot must have been taken after the fork block (if any is configured)
sn > fork_block &&
// Snapshot must be greater or equal to the warp barrier, if any
sn >= expected_warp_block
)
)
.filter_map(|(p, peer)| {
peer.snapshot_hash.map(|hash| (p, hash))
.filter(|(_, hash)| !self.snapshot.is_known_bad(&hash) )
.and_then(|(p, hash)| peer.snapshot_number.map(|n| (*p, n, hash) ) )
})
.collect::<Vec<(PeerId, BlockNumber, H256)>>();
// Sort collection of peers by highest block number.
snapshots.sort_by(|&(_, ref b1, _), &(_, ref b2, _)| b2.cmp(b1) );
let mut snapshot_peers = HashMap::new();
let mut max_peers: usize = 0;
let mut best_hash = None;
let mut best_snapshot_block = None;
// Of the available snapshots, find the one seeded by the most peers. On a tie, the
// snapshot closest to the tip will be used (unfortunately this is the common case).
for (p, snapshot_block, hash) in snapshots {
let peers = snapshot_peers.entry(hash).or_insert_with(Vec::new);
peers.push(p);
if peers.len() > max_peers {
trace!(target: "snapshot_sync", "{} is the new best snapshotting peer, has snapshot at block #{}/{}", p, snapshot_block, hash);
max_peers = peers.len();
best_hash = Some(hash);
best_snapshot_block = Some(snapshot_block);
}
}
(best_snapshot_block, best_hash, max_peers, snapshot_peers)
};
// If we've waited long enough (10sec), a single peer will have to be enough for the snapshot sync to start.
let timeout = (self.state == WaitingPeers) &&
self.sync_start_time.map_or(false, |t| t.elapsed() > WAIT_PEERS_TIMEOUT);
if let (Some(block), Some(hash), Some(peers)) = (
best_snapshot_block,
best_hash,
best_hash.map_or(None, |h| snapshot_peers.get(&h))
) {
trace!(target: "snapshot_sync", "We can sync a snapshot at #{:?}/{:?} from {} peer(s): {:?}",
best_snapshot_block, best_hash, max_peers, snapshot_peers.values());
if max_peers >= SNAPSHOT_MIN_PEERS {
debug!(target: "snapshot_sync", "Starting confirmed snapshot sync for a snapshot at #{}/{:?} with peer {:?}", block, hash, peers);
self.start_snapshot_sync(io, peers);
} else if timeout {
debug!(target: "snapshot_sync", "Starting unconfirmed snapshot sync for a snapshot at #{}/{:?} with peer {:?}", block, hash, peers);
self.start_snapshot_sync(io, peers);
} else {
trace!(target: "snapshot_sync", "Waiting a little more to let more snapshot peers connect.")
}
} else if timeout {
if !self.warp_sync.is_warp_only() {
debug!(target: "snapshot_sync", "Not syncing snapshots (or none found), proceeding with normal sync.");
self.set_state(SyncState::Idle);
self.continue_sync(io);
} else {
warn!(target: "snapshot_sync", "No snapshots currently available at #{}. Try using a smaller value for --warp-barrier", expected_warp_block);
}
}
}
/// Start a snapshot with all peers that we are not currently asking something else from. If
/// we're already snapshotting with a peer, set sync state to `SnapshotData` and continue
/// fetching the snapshot. Note that we only ever sync snapshots from one peer so here we send
/// out the request for a manifest to all the peers that have it and start syncing the snapshot