From a97bb79b62cdc7904315d0d0d6714eec1326ef3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:24:36 -0500 Subject: [PATCH 01/21] feat: burnchain view now tracks the current reward cycle's consensus hash --- src/burnchains/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index d8be22859e..2b757c6cf4 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -521,6 +521,7 @@ pub struct BurnchainView { pub burn_stable_block_height: u64, // latest stable block height (e.g. chain tip minus 7) pub burn_stable_block_hash: BurnchainHeaderHash, // latest stable burn block hash pub last_burn_block_hashes: HashMap, // map all block heights from burn_block_height back to the oldest one we'll take for considering the peer a neighbor + pub rc_consensus_hash: ConsensusHash, // consensus hash of the current reward cycle's start block } /// The burnchain block's encoded state transition: From 7ad241b74b893466767bac4f512a888749713837 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:25:02 -0500 Subject: [PATCH 02/21] feat: load the reward cycle consensus hash from the sortition DB as the consensus hash of the first sortition in that reward cycle --- src/chainstate/burn/db/sortdb.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 10a36d67fb..65035c8003 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -3707,7 +3707,7 @@ impl SortitionDB { /// Get a burn blockchain snapshot, given a burnchain configuration struct. /// Used mainly by the network code to determine what the chain tip currently looks like. pub fn get_burnchain_view( - conn: &DBConn, + conn: &SortitionDBConn, burnchain: &Burnchain, chain_tip: &BlockSnapshot, ) -> Result { @@ -3779,12 +3779,26 @@ impl SortitionDB { .unwrap_or(&burnchain.first_block_hash) .clone(); + let rc = burnchain + .block_height_to_reward_cycle(chain_tip.block_height) + .expect("FATAL: block height does not have a reward cycle"); + + let rc_height = burnchain.reward_cycle_to_block_height(rc); + let rc_consensus_hash = SortitionDB::get_ancestor_snapshot( + conn, + cmp::min(chain_tip.block_height, rc_height), + &chain_tip.sortition_id, + )? + .map(|sn| sn.consensus_hash) + .ok_or(db_error::NotFoundError)?; + test_debug!( - "Chain view: {},{}-{},{}", + "Chain view: {},{}-{},{},{}", chain_tip.block_height, chain_tip.burn_header_hash, stable_block_height, - &burn_stable_block_hash + &burn_stable_block_hash, + &rc_consensus_hash, ); Ok(BurnchainView { burn_block_height: chain_tip.block_height, @@ -3792,6 +3806,7 @@ impl SortitionDB { burn_stable_block_height: stable_block_height, burn_stable_block_hash: burn_stable_block_hash, last_burn_block_hashes: last_burn_block_hashes, + rc_consensus_hash, }) } } From 2faae13b918c78098c901d054f346144d3b5c592 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:25:26 -0500 Subject: [PATCH 03/21] feat: have peers report in the `services` field that they support the stackerdb protocol, and have remote peers reply to handshakes with a list of stackerdbs that they support (which is then tracked in the conversation) --- src/net/chat.rs | 496 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 490 insertions(+), 6 deletions(-) diff --git a/src/net/chat.rs b/src/net/chat.rs index 30d97d60e1..e0ef27dd5e 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -45,6 +45,7 @@ use crate::net::db::PeerDB; use crate::net::db::*; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::net::relay::*; +use crate::net::ContractId; use crate::net::Error as net_error; use crate::net::GetBlocksInv; use crate::net::GetPoxInv; @@ -139,6 +140,7 @@ pub struct NeighborStats { pub block_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) pub microblocks_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) pub transaction_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) + pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) pub relayed_messages: HashMap, } @@ -162,6 +164,7 @@ impl NeighborStats { block_push_rx_counts: VecDeque::new(), microblocks_push_rx_counts: VecDeque::new(), transaction_push_rx_counts: VecDeque::new(), + stackerdb_push_rx_counts: VecDeque::new(), relayed_messages: HashMap::new(), } } @@ -201,6 +204,14 @@ impl NeighborStats { } } + pub fn add_stackerdb_push(&mut self, message_size: u64) -> () { + self.stackerdb_push_rx_counts + .push_back((get_epoch_time_secs(), message_size)); + while self.stackerdb_push_rx_counts.len() > NUM_BLOCK_POINTS { + self.stackerdb_push_rx_counts.pop_front(); + } + } + pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) -> () { if let Some(stats) = self.relayed_messages.get_mut(addr) { stats.num_messages += 1; @@ -280,6 +291,11 @@ impl NeighborStats { NeighborStats::get_bandwidth(&self.transaction_push_rx_counts, BLOCK_POINT_LIFETIME) } + /// Get a peer's total stackerdb-push bandwidth usage + pub fn get_stackerdb_push_bandwidth(&self) -> f64 { + NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BLOCK_POINT_LIFETIME) + } + /// Determine how many of a particular message this peer has received pub fn get_message_recv_count(&self, msg_id: StacksMessageID) -> u64 { *(self.msg_rx_counts.get(&msg_id).unwrap_or(&0)) @@ -318,6 +334,9 @@ pub struct ConversationP2P { pub stats: NeighborStats, + // which stacker DBs this peer replicates + pub db_smart_contracts: Vec, + // outbound replies pub reply_handles: VecDeque, @@ -522,6 +541,8 @@ impl ConversationP2P { stats: NeighborStats::new(outbound), reply_handles: VecDeque::new(), + db_smart_contracts: vec![], + epochs: epochs, } } @@ -635,6 +656,21 @@ impl ConversationP2P { (peer_services & expected_bits) == expected_bits } + /// Does this remote neighbor support stacker DBs? It will if it has the STACKERDB bit set + pub fn supports_stackerdb(peer_services: u16) -> bool { + (peer_services & (ServiceFlags::STACKERDB as u16)) != 0 + } + + /// Does this remote neighbor support a particular StackerDB? + pub fn replicates_stackerdb(&self, db: &ContractId) -> bool { + for cid in self.db_smart_contracts.iter() { + if cid == db { + return true; + } + } + false + } + /// Determine whether or not a given (height, burn_header_hash) pair _disagrees_ with our /// burnchain view. If it does, return true. If it doesn't (including if the given pair is /// simply absent from the chain_view), then return False. @@ -1072,6 +1108,20 @@ impl ConversationP2P { Ok(updated) } + /// Update connection state from stacker DB handshake data. + /// Just synchronizes the announced smart contracts for which this node replicates data. + pub fn update_from_stacker_db_handshake_data( + &mut self, + stacker_db_data: &StackerDBHandshakeData, + ) { + self.db_smart_contracts = stacker_db_data.smart_contracts.clone(); + } + + /// Forget about this peer's stacker DB replication state + pub fn clear_stacker_db_handshake_data(&mut self) { + self.db_smart_contracts.clear(); + } + /// Handle an inbound NAT-punch request -- just tell the peer what we think their IP/port are. /// No authentication from the peer is necessary. fn handle_natpunch_request(&self, chain_view: &BurnchainView, nonce: u32) -> StacksMessage { @@ -1182,11 +1232,28 @@ impl ConversationP2P { } let accept_data = HandshakeAcceptData::new(local_peer, self.heartbeat); + let stacks_message = if ConversationP2P::supports_stackerdb(local_peer.services) + && ConversationP2P::supports_stackerdb(self.peer_services) + { + StacksMessageType::StackerDBHandshakeAccept( + accept_data, + StackerDBHandshakeData { + rc_consensus_hash: chain_view.rc_consensus_hash.clone(), + // placeholder sbtc address for now + smart_contracts: vec![ + ContractId::parse("SP000000000000000000002Q6VF78.sbtc").unwrap() + ], + }, + ) + } else { + StacksMessageType::HandshakeAccept(accept_data) + }; + let accept = StacksMessage::from_chain_view( self.version, self.network_id, chain_view, - StacksMessageType::HandshakeAccept(accept_data), + stacks_message, ); // update stats @@ -1203,8 +1270,10 @@ impl ConversationP2P { /// Called from the p2p network thread. fn handle_handshake_accept( &mut self, + burnchain_view: &BurnchainView, preamble: &Preamble, handshake_accept: &HandshakeAcceptData, + stackerdb_accept: Option<&StackerDBHandshakeData>, ) -> Result<(), net_error> { self.update_from_handshake_data(preamble, &handshake_accept.handshake)?; self.peer_heartbeat = @@ -1218,6 +1287,28 @@ impl ConversationP2P { handshake_accept.heartbeat_interval }; + if let Some(stackerdb_accept) = stackerdb_accept { + test_debug!( + "{} =?= {}", + &stackerdb_accept.rc_consensus_hash, + &burnchain_view.rc_consensus_hash + ); + if stackerdb_accept.rc_consensus_hash == burnchain_view.rc_consensus_hash { + // remote peer is in the same reward cycle as us. + self.update_from_stacker_db_handshake_data(stackerdb_accept); + } else { + // remote peer's burnchain view has diverged, so assume no longer replicating (we + // can't talk to it anyway). This can happen once per reward cycle for a few + // minutes as nodes begin the next reward cycle, but it's harmless -- at worst, it + // just means that no stacker DB replication happens between this peer and + // localhost during this time. + self.clear_stacker_db_handshake_data(); + } + } else { + // no longer replicating + self.clear_stacker_db_handshake_data(); + } + self.stats.last_handshake_time = get_epoch_time_secs(); debug!( @@ -1792,6 +1883,42 @@ impl ConversationP2P { Ok(None) } + /// Validate a pushed stackerdb chunk + /// Update bandwidth accounting, but forward the stackerdb chunk along. + /// Possibly return a reply handle for a NACK if we throttle the remote sender + fn validate_stackerdb_push( + &mut self, + local_peer: &LocalPeer, + chain_view: &BurnchainView, + preamble: &Preamble, + relayers: Vec, + ) -> Result, net_error> { + assert!(preamble.payload_len > 1); // don't count 1-byte type prefix + + if !self.process_relayers(local_peer, preamble, &relayers) { + debug!( + "Drop pushed stackerdb chunk -- invalid relayers {:?}", + &relayers + ); + self.stats.msgs_err += 1; + return Err(net_error::InvalidMessage); + } + + self.stats + .add_stackerdb_push((preamble.payload_len as u64) - 1); + + if self.connection.options.max_stackerdb_push_bandwidth > 0 + && self.stats.get_stackerdb_push_bandwidth() + > (self.connection.options.max_stackerdb_push_bandwidth as f64) + { + debug!("Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); + return self + .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) + .and_then(|handle| Ok(Some(handle))); + } + Ok(None) + } + /// Handle an inbound authenticated p2p data-plane message. /// Return the message if not handled fn handle_data_message( @@ -1880,6 +2007,22 @@ impl ConversationP2P { } } } + StacksMessageType::StackerDBChunk(_) => { + // not handled here, but do some accounting -- we can't receive too many + // stackerdb chunks per second + match self.validate_stackerdb_push( + local_peer, + chain_view, + &msg.preamble, + msg.relayers.clone(), + )? { + Some(handle) => Ok(handle), + None => { + // will forward upstream + return Ok(Some(msg)); + } + } + } _ => { // all else will forward upstream return Ok(Some(msg)); @@ -2052,7 +2195,12 @@ impl ConversationP2P { } StacksMessageType::HandshakeAccept(ref data) => { test_debug!("{:?}: Got HandshakeAccept", &self); - self.handle_handshake_accept(&msg.preamble, data) + self.handle_handshake_accept(burnchain_view, &msg.preamble, data, None) + .and_then(|_| Ok(None)) + } + StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { + test_debug!("{:?}: Got StackerDBHandshakeAccept", &self); + self.handle_handshake_accept(burnchain_view, &msg.preamble, data, Some(db_data)) .and_then(|_| Ok(None)) } StacksMessageType::Ping(_) => { @@ -2124,7 +2272,7 @@ impl ConversationP2P { StacksMessageType::HandshakeAccept(ref data) => { if solicited { test_debug!("{:?}: Got unauthenticated HandshakeAccept", &self); - self.handle_handshake_accept(&msg.preamble, data) + self.handle_handshake_accept(burnchain_view, &msg.preamble, data, None) .and_then(|_| Ok(None)) } else { test_debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); @@ -2134,6 +2282,22 @@ impl ConversationP2P { Ok(None) } } + StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { + if solicited { + test_debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); + self.handle_handshake_accept(burnchain_view, &msg.preamble, data, Some(db_data)) + .and_then(|_| Ok(None)) + } else { + test_debug!( + "{:?}: Unsolicited unauthenticated StackerDBHandshakeAccept", + &self + ); + + // don't update stats or state, and don't pass back + consume = true; + Ok(None) + } + } StacksMessageType::HandshakeReject => { test_debug!("{:?}: Got unauthenticated HandshakeReject", &self); @@ -2426,6 +2590,11 @@ mod test { use super::*; + const DEFAULT_SERVICES: u16 = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); + const STACKERDB_SERVICES: u16 = (ServiceFlags::RELAY as u16) + | (ServiceFlags::RPC as u16) + | (ServiceFlags::STACKERDB as u16); + fn make_test_chain_dbs( testname: &str, burnchain: &Burnchain, @@ -2434,6 +2603,7 @@ mod test { data_url: UrlString, asn4_entries: &Vec, initial_neighbors: &Vec, + services: u16, ) -> (PeerDB, SortitionDB, PoxId, StacksChainState) { let test_path = format!("/tmp/blockstack-test-databases-{}", testname); match fs::metadata(&test_path) { @@ -2449,7 +2619,7 @@ mod test { let peerdb_path = format!("{}/peers.sqlite", &test_path); let chainstate_path = format!("{}/chainstate", &test_path); - let peerdb = PeerDB::connect( + let mut peerdb = PeerDB::connect( &peerdb_path, true, network_id, @@ -2474,6 +2644,10 @@ mod test { ) .unwrap(); + let mut tx = peerdb.tx_begin().unwrap(); + PeerDB::set_local_services(&mut tx, services).unwrap(); + tx.commit().unwrap(); + let first_burnchain_block_height = burnchain.first_block_height; let first_burnchain_block_hash = burnchain.first_block_hash; @@ -2640,7 +2814,6 @@ mod test { } #[test] - #[ignore] fn convo_handshake_accept() { with_timeout(100, || { let conn_opts = ConnectionOptions::default(); @@ -2656,6 +2829,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -2667,6 +2841,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_accept_2", @@ -2676,6 +2851,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -2808,6 +2984,287 @@ mod test { }) } + /// Inner function for testing various kinds of stackerdb/non-stackerdb peer interactions + fn inner_convo_handshake_accept_stackerdb( + peer_1_services: u16, + peer_1_rc_consensus_hash: ConsensusHash, + peer_2_services: u16, + peer_2_rc_consensus_hash: ConsensusHash, + ) { + with_timeout(100, move || { + let conn_opts = ConnectionOptions::default(); + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let burnchain = testing_burnchain_config(); + + let mut chain_view_1 = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: peer_1_rc_consensus_hash.clone(), + }; + chain_view_1.make_test_data(); + + let mut chain_view_2 = chain_view_1.clone(); + chain_view_2.rc_consensus_hash = peer_2_rc_consensus_hash.clone(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( + &format!( + "convo_handshake_accept_1-{}-{}-{}-{}", + peer_1_services, + peer_2_services, + &peer_1_rc_consensus_hash, + &peer_2_rc_consensus_hash + ), + &burnchain, + 0x9abcdef0, + 12350, + "http://peer1.com".into(), + &vec![], + &vec![], + peer_1_services, + ); + let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( + &format!( + "convo_handshake_accept_2-{}-{}-{}-{}", + peer_1_services, + peer_2_services, + &peer_2_rc_consensus_hash, + &peer_2_rc_consensus_hash + ), + &burnchain, + 0x9abcdef0, + 12351, + "http://peer2.com".into(), + &vec![], + &vec![], + peer_2_services, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view_1); + db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view_2); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + let mut convo_2 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 sends a handshake to convo_2 + let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); + let handshake_1 = convo_1 + .sign_message( + &chain_view_1, + &local_peer_1.private_key, + StacksMessageType::Handshake(handshake_data_1.clone()), + ) + .unwrap(); + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); + + // convo_2 receives it and processes it, and since no one is waiting for it, will forward + // it along to the chat caller (us) + test_debug!("send handshake"); + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view_2, + ) + .unwrap(); + + // convo_1 has a handshakeaccept + test_debug!("send handshake-accept"); + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view_1, + ) + .unwrap(); + + let reply_1 = rh_1.recv(0).unwrap(); + + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 1); + + // convo 2 returns the handshake from convo 1 + match unhandled_2[0].payload { + StacksMessageType::Handshake(ref data) => { + assert_eq!(handshake_data_1, *data); + } + _ => { + assert!(false); + } + }; + + if (peer_1_services & (ServiceFlags::STACKERDB as u16) != 0) + && (peer_2_services & (ServiceFlags::STACKERDB as u16) != 0) + { + // received a valid StackerDBHandshakeAccept from peer 2? + match reply_1.payload { + StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { + assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); + assert_eq!(data.handshake.port, local_peer_2.port); + assert_eq!(data.handshake.services, local_peer_2.services); + assert_eq!( + data.handshake.node_public_key, + StacksPublicKeyBuffer::from_public_key( + &Secp256k1PublicKey::from_private(&local_peer_2.private_key) + ) + ); + assert_eq!( + data.handshake.expire_block_height, + local_peer_2.private_key_expire + ); + assert_eq!(data.handshake.data_url, "http://peer2.com".into()); + assert_eq!(data.heartbeat_interval, conn_opts.heartbeat); + + // remote peer always replies with its supported smart contracts + assert_eq!( + db_data.smart_contracts, + vec![ContractId::parse("SP000000000000000000002Q6VF78.sbtc").unwrap()] + ); + + if peer_1_rc_consensus_hash == peer_2_rc_consensus_hash { + assert_eq!(db_data.rc_consensus_hash, chain_view_1.rc_consensus_hash); + + // peers learn each others' smart contract DBs + eprintln!( + "{:?}, {:?}", + &convo_1.db_smart_contracts, &convo_2.db_smart_contracts + ); + assert_eq!(convo_1.db_smart_contracts.len(), 1); + } else { + assert_eq!(db_data.rc_consensus_hash, chain_view_2.rc_consensus_hash); + + // peers ignore each others' smart contract DBs + eprintln!( + "{:?}, {:?}", + &convo_1.db_smart_contracts, &convo_2.db_smart_contracts + ); + assert_eq!(convo_1.db_smart_contracts.len(), 0); + } + } + _ => { + assert!(false); + } + }; + } else { + // received a valid HandshakeAccept from peer 2? + match reply_1.payload { + StacksMessageType::HandshakeAccept(ref data) => { + assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); + assert_eq!(data.handshake.port, local_peer_2.port); + assert_eq!(data.handshake.services, local_peer_2.services); + assert_eq!( + data.handshake.node_public_key, + StacksPublicKeyBuffer::from_public_key( + &Secp256k1PublicKey::from_private(&local_peer_2.private_key) + ) + ); + assert_eq!( + data.handshake.expire_block_height, + local_peer_2.private_key_expire + ); + assert_eq!(data.handshake.data_url, "http://peer2.com".into()); + assert_eq!(data.heartbeat_interval, conn_opts.heartbeat); + } + _ => { + assert!(false); + } + } + } + + // convo_2 got updated with convo_1's peer info, but no heartbeat info + assert_eq!(convo_2.peer_heartbeat, 3600); + assert_eq!( + convo_2.connection.get_public_key().unwrap(), + Secp256k1PublicKey::from_private(&local_peer_1.private_key) + ); + assert_eq!(convo_2.data_url, "http://peer1.com".into()); + + // convo_1 got updated with convo_2's peer info, as well as heartbeat + assert_eq!(convo_1.peer_heartbeat, conn_opts.heartbeat); + assert_eq!( + convo_1.connection.get_public_key().unwrap(), + Secp256k1PublicKey::from_private(&local_peer_2.private_key) + ); + assert_eq!(convo_1.data_url, "http://peer2.com".into()); + + assert_eq!(convo_1.peer_services, peer_2_services); + assert_eq!(convo_2.peer_services, peer_1_services); + }) + } + + #[test] + /// Two stackerdb peers handshake + fn convo_handshake_accept_stackerdb() { + inner_convo_handshake_accept_stackerdb( + STACKERDB_SERVICES, + ConsensusHash([0x33; 20]), + STACKERDB_SERVICES, + ConsensusHash([0x33; 20]), + ); + } + + #[test] + /// A stackerdb peer handshakes with a legacy peer + fn convo_handshake_accept_stackerdb_legacy() { + inner_convo_handshake_accept_stackerdb( + STACKERDB_SERVICES, + ConsensusHash([0x44; 20]), + DEFAULT_SERVICES, + ConsensusHash([0x44; 20]), + ); + } + + #[test] + /// Two stackerdb peers handshake, but with different reward cycle consensus hashes + fn convo_handshake_accept_stackerdb_bad_consensus_hash() { + inner_convo_handshake_accept_stackerdb( + STACKERDB_SERVICES, + ConsensusHash([0x33; 20]), + STACKERDB_SERVICES, + ConsensusHash([0x44; 20]), + ); + } + #[test] fn convo_handshake_reject() { let conn_opts = ConnectionOptions::default(); @@ -2827,6 +3284,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -2838,6 +3296,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_reject_2", @@ -2847,6 +3306,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -2958,6 +3418,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -2974,6 +3435,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_badsignature_2", @@ -2983,6 +3445,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -3092,6 +3555,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -3108,6 +3572,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_self_2", @@ -3117,6 +3582,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -3227,6 +3693,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -3243,6 +3710,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_ping_2", @@ -3252,6 +3720,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -3394,6 +3863,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -3410,6 +3880,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_ping_loop_2", @@ -3419,6 +3890,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -3539,7 +4011,8 @@ mod test { // received a valid HandshakeAccept from peer 2 match reply_handshake_1.payload { - StacksMessageType::HandshakeAccept(ref data) => { + StacksMessageType::HandshakeAccept(ref data) + | StacksMessageType::StackerDBHandshakeAccept(ref data, ..) => { assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); assert_eq!(data.handshake.port, local_peer_2.port); assert_eq!(data.handshake.services, local_peer_2.services); @@ -3611,6 +4084,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -3627,6 +4101,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_nack_unsolicited_2", @@ -3636,6 +4111,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -3754,6 +4230,7 @@ mod test { burn_stable_block_height: 12331 - 7, // burnchain.reward_cycle_to_block_height(burnchain.block_height_to_reward_cycle(12341 - 8).unwrap() - 1), burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -3765,6 +4242,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_handshake_getblocksinv_2", @@ -3774,6 +4252,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -4053,6 +4532,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -4069,6 +4549,7 @@ mod test { "http://peer1.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( "convo_natpunch_2", @@ -4078,6 +4559,7 @@ mod test { "http://peer2.com".into(), &vec![], &vec![], + DEFAULT_SERVICES, ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); @@ -4187,6 +4669,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); @@ -4458,6 +4941,7 @@ mod test { burn_stable_block_height: 12341, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; chain_view.make_test_data(); From 53c729c88bd92a239a067dfbc99a4dac0da544c9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:26:09 -0500 Subject: [PATCH 04/21] chore: add codecs for all of the stackerdb messages, as well as tests --- src/net/codec.rs | 380 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 380 insertions(+) diff --git a/src/net/codec.rs b/src/net/codec.rs index 9f23876644..3b28962865 100644 --- a/src/net/codec.rs +++ b/src/net/codec.rs @@ -58,6 +58,9 @@ use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::BurnchainHeaderHash; use crate::types::StacksPublicKeyBuffer; +use clarity::vm::types::{QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::ContractName; + impl Preamble { /// Make an empty preamble with the given version and fork-set identifier, and payload length. pub fn new( @@ -749,6 +752,131 @@ impl StacksMessageCodec for MemPoolSyncData { } } +impl StacksMessageCodec for ContractId { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.0.issuer.0)?; + write_next(fd, &self.0.issuer.1)?; + write_next(fd, &self.0.name)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let version: u8 = read_next(fd)?; + let bytes: [u8; 20] = read_next(fd)?; + let name: ContractName = read_next(fd)?; + let qn = QualifiedContractIdentifier::new(StandardPrincipalData(version, bytes), name); + Ok(ContractId(qn)) + } +} + +impl StacksMessageCodec for StackerDBHandshakeData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + if self.smart_contracts.len() > 256 { + return Err(codec_error::ArrayTooLong); + } + // force no more than 256 names in the protocol + let len_u8: u8 = self.smart_contracts.len().try_into().expect("Unreachable"); + write_next(fd, &self.rc_consensus_hash)?; + write_next(fd, &len_u8)?; + for cid in self.smart_contracts.iter() { + write_next(fd, cid)?; + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let rc_consensus_hash = read_next(fd)?; + let len_u8: u8 = read_next(fd)?; + let mut smart_contracts = Vec::with_capacity(len_u8 as usize); + for _ in 0..len_u8 { + let cid: ContractId = read_next(fd)?; + smart_contracts.push(cid); + } + Ok(StackerDBHandshakeData { + rc_consensus_hash, + smart_contracts, + }) + } +} + +impl StacksMessageCodec for StackerDBGetChunkInvData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.contract_id)?; + write_next(fd, &self.rc_consensus_hash)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let contract_id: ContractId = read_next(fd)?; + let rc_consensus_hash: ConsensusHash = read_next(fd)?; + Ok(StackerDBGetChunkInvData { + contract_id, + rc_consensus_hash, + }) + } +} + +impl StacksMessageCodec for StackerDBChunkInvData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + if self.chunk_versions.len() > (stackerdb::STACKERDB_INV_MAX as usize) { + return Err(codec_error::ArrayTooLong); + } + write_next(fd, &self.chunk_versions)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let chunk_versions: Vec = read_next_at_most(fd, stackerdb::STACKERDB_INV_MAX.into())?; + Ok(StackerDBChunkInvData { chunk_versions }) + } +} + +impl StacksMessageCodec for StackerDBGetChunkData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.contract_id)?; + write_next(fd, &self.rc_consensus_hash)?; + write_next(fd, &self.chunk_id)?; + write_next(fd, &self.chunk_version)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let contract_id: ContractId = read_next(fd)?; + let rc_consensus_hash: ConsensusHash = read_next(fd)?; + let chunk_id: u32 = read_next(fd)?; + let chunk_version: u32 = read_next(fd)?; + Ok(StackerDBGetChunkData { + contract_id, + rc_consensus_hash, + chunk_id, + chunk_version, + }) + } +} + +impl StacksMessageCodec for StackerDBChunkData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.chunk_id)?; + write_next(fd, &self.chunk_version)?; + write_next(fd, &self.sig)?; + write_next(fd, &self.data)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let chunk_id: u32 = read_next(fd)?; + let chunk_version: u32 = read_next(fd)?; + let sig: MessageSignature = read_next(fd)?; + let data: Vec = read_next(fd)?; + Ok(StackerDBChunkData { + chunk_id, + chunk_version, + sig, + data, + }) + } +} + impl StacksMessageCodec for RelayData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.peer)?; @@ -787,6 +915,15 @@ impl StacksMessageType { StacksMessageType::Pong(ref _m) => StacksMessageID::Pong, StacksMessageType::NatPunchRequest(ref _m) => StacksMessageID::NatPunchRequest, StacksMessageType::NatPunchReply(ref _m) => StacksMessageID::NatPunchReply, + StacksMessageType::StackerDBHandshakeAccept(ref _h, ref _m) => { + StacksMessageID::StackerDBHandshakeAccept + } + StacksMessageType::StackerDBGetChunkInv(ref _m) => { + StacksMessageID::StackerDBGetChunkInv + } + StacksMessageType::StackerDBChunkInv(ref _m) => StacksMessageID::StackerDBChunkInv, + StacksMessageType::StackerDBGetChunk(ref _m) => StacksMessageID::StackerDBGetChunk, + StacksMessageType::StackerDBChunk(ref _m) => StacksMessageID::StackerDBChunk, } } @@ -811,6 +948,13 @@ impl StacksMessageType { StacksMessageType::Pong(ref _m) => "Pong", StacksMessageType::NatPunchRequest(ref _m) => "NatPunchRequest", StacksMessageType::NatPunchReply(ref _m) => "NatPunchReply", + StacksMessageType::StackerDBHandshakeAccept(ref _h, ref _m) => { + "StackerDBHandshakeAccept" + } + StacksMessageType::StackerDBGetChunkInv(ref _m) => "StackerDBGetChunkInv", + StacksMessageType::StackerDBChunkInv(ref _m) => "StackerDBChunkInv", + StacksMessageType::StackerDBGetChunk(ref _m) => "StackerDBGetChunk", + StacksMessageType::StackerDBChunk(ref _m) => "StackerDBChunk", } } @@ -869,6 +1013,38 @@ impl StacksMessageType { StacksMessageType::NatPunchReply(ref m) => { format!("NatPunchReply({},{}:{})", m.nonce, &m.addrbytes, m.port) } + StacksMessageType::StackerDBHandshakeAccept(ref h, ref m) => { + format!( + "StackerDBHandshakeAccept({},{},{:?})", + &to_hex(&h.handshake.node_public_key.to_bytes()), + &m.rc_consensus_hash, + &m.smart_contracts + ) + } + StacksMessageType::StackerDBGetChunkInv(ref m) => { + format!( + "StackerDBGetChunkInv({}.{})", + &m.contract_id, &m.rc_consensus_hash + ) + } + StacksMessageType::StackerDBChunkInv(ref m) => { + format!("StackerDBChunkInv({:?})", &m.chunk_versions) + } + StacksMessageType::StackerDBGetChunk(ref m) => { + format!( + "StackerDBGetChunk({},{},{},{})", + &m.contract_id, &m.rc_consensus_hash, m.chunk_id, m.chunk_version + ) + } + StacksMessageType::StackerDBChunk(ref m) => { + format!( + "StackerDBChunk({},{},{},sz={})", + m.chunk_id, + m.chunk_version, + &m.sig, + m.data.len() + ) + } } } } @@ -902,6 +1078,19 @@ impl StacksMessageCodec for StacksMessageID { x if x == StacksMessageID::Pong as u8 => StacksMessageID::Pong, x if x == StacksMessageID::NatPunchRequest as u8 => StacksMessageID::NatPunchRequest, x if x == StacksMessageID::NatPunchReply as u8 => StacksMessageID::NatPunchReply, + x if x == StacksMessageID::StackerDBHandshakeAccept as u8 => { + StacksMessageID::StackerDBHandshakeAccept + } + x if x == StacksMessageID::StackerDBGetChunkInv as u8 => { + StacksMessageID::StackerDBGetChunkInv + } + x if x == StacksMessageID::StackerDBChunkInv as u8 => { + StacksMessageID::StackerDBChunkInv + } + x if x == StacksMessageID::StackerDBGetChunk as u8 => { + StacksMessageID::StackerDBGetChunk + } + x if x == StacksMessageID::StackerDBChunk as u8 => StacksMessageID::StackerDBChunk, _ => { return Err(codec_error::DeserializeError( "Unknown message ID".to_string(), @@ -935,6 +1124,14 @@ impl StacksMessageCodec for StacksMessageType { StacksMessageType::Pong(ref m) => write_next(fd, m)?, StacksMessageType::NatPunchRequest(ref nonce) => write_next(fd, nonce)?, StacksMessageType::NatPunchReply(ref m) => write_next(fd, m)?, + StacksMessageType::StackerDBHandshakeAccept(ref h, ref m) => { + write_next(fd, h)?; + write_next(fd, m)? + } + StacksMessageType::StackerDBGetChunkInv(ref m) => write_next(fd, m)?, + StacksMessageType::StackerDBChunkInv(ref m) => write_next(fd, m)?, + StacksMessageType::StackerDBGetChunk(ref m) => write_next(fd, m)?, + StacksMessageType::StackerDBChunk(ref m) => write_next(fd, m)?, } Ok(()) } @@ -1012,6 +1209,27 @@ impl StacksMessageCodec for StacksMessageType { let m: NatPunchData = read_next(fd)?; StacksMessageType::NatPunchReply(m) } + StacksMessageID::StackerDBHandshakeAccept => { + let h: HandshakeAcceptData = read_next(fd)?; + let m: StackerDBHandshakeData = read_next(fd)?; + StacksMessageType::StackerDBHandshakeAccept(h, m) + } + StacksMessageID::StackerDBGetChunkInv => { + let m: StackerDBGetChunkInvData = read_next(fd)?; + StacksMessageType::StackerDBGetChunkInv(m) + } + StacksMessageID::StackerDBChunkInv => { + let m: StackerDBChunkInvData = read_next(fd)?; + StacksMessageType::StackerDBChunkInv(m) + } + StacksMessageID::StackerDBGetChunk => { + let m: StackerDBGetChunkData = read_next(fd)?; + StacksMessageType::StackerDBGetChunk(m) + } + StacksMessageID::StackerDBChunk => { + let m: StackerDBChunkData = read_next(fd)?; + StacksMessageType::StackerDBChunk(m) + } StacksMessageID::Reserved => { return Err(codec_error::DeserializeError( "Unsupported message ID 'reserved'".to_string(), @@ -1933,6 +2151,122 @@ pub mod test { check_codec_and_corruption::(&data, &bytes); } + #[test] + fn codec_StackerDBHandshakeAccept() { + let data = StackerDBHandshakeData { + rc_consensus_hash: ConsensusHash([0x01; 20]), + smart_contracts: vec![ + ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), + ContractId::parse("SP28D54YKFCMRKXBR6BR0E4BPN57S62RSM4XEVPRP.bar").unwrap(), + ], + }; + let bytes = vec![ + // rc consensus hash + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, // len(smart_contracts) + 0x02, // SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN + 0x16, 0x11, 0x7b, 0x59, 0x1a, 0xdf, 0x7c, 0xae, 0xe4, 0x3b, 0x7e, 0x5d, 0x88, 0x24, + 0xe8, 0x51, 0xb9, 0x35, 0xbc, 0xa9, 0xae, // len(foo) + 0x03, // foo + 0x66, 0x6f, 0x6f, // SP28D54YKFCMRKXBR6BR0E4BPN57S62RSM4XEVPRP + 0x16, 0x90, 0xd2, 0x93, 0xd3, 0x7b, 0x29, 0x89, 0xf5, 0x78, 0x32, 0xf0, 0x07, 0x11, + 0x76, 0xa9, 0x4f, 0x93, 0x0b, 0x19, 0xa1, // len(bar) + 0x03, // bar + 0x62, 0x61, 0x72, + ]; + + check_codec_and_corruption::(&data, &bytes); + } + + #[test] + fn codec_StackerDBGetChunkInvData() { + let data = StackerDBGetChunkInvData { + contract_id: ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + }; + + let bytes = vec![ + // SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN + 0x16, 0x11, 0x7b, 0x59, 0x1a, 0xdf, 0x7c, 0xae, 0xe4, 0x3b, 0x7e, 0x5d, 0x88, 0x24, + 0xe8, 0x51, 0xb9, 0x35, 0xbc, 0xa9, 0xae, // len(foo) + 0x03, // foo + 0x66, 0x6f, 0x6f, // rc consensus hash + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + ]; + + check_codec_and_corruption::(&data, &bytes); + } + + #[test] + fn codec_StackerDBChunkInvData() { + let data = StackerDBChunkInvData { + chunk_versions: vec![0, 1, 2, 3], + }; + + let bytes = vec![ + // len(chunk_versions) + 0x00, 0x00, 0x00, 0x04, // 0u32 + 0x00, 0x00, 0x00, 0x00, // 1u32 + 0x00, 0x00, 0x00, 0x01, // 2u32 + 0x00, 0x00, 0x00, 0x02, // 3u32 + 0x00, 0x00, 0x00, 0x03, + ]; + + check_codec_and_corruption::(&data, &bytes); + } + + #[test] + fn codec_StackerDBGetChunkData() { + let data = StackerDBGetChunkData { + contract_id: ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + chunk_id: 2, + chunk_version: 3, + }; + + let bytes = vec![ + // SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN + 0x16, 0x11, 0x7b, 0x59, 0x1a, 0xdf, 0x7c, 0xae, 0xe4, 0x3b, 0x7e, 0x5d, 0x88, 0x24, + 0xe8, 0x51, 0xb9, 0x35, 0xbc, 0xa9, 0xae, // len(foo) + 0x03, // foo + 0x66, 0x6f, 0x6f, // rc consensus hash + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, // chunk id + 0x00, 0x00, 0x00, 0x02, // chunk version + 0x00, 0x00, 0x00, 0x03, + ]; + + check_codec_and_corruption::(&data, &bytes); + } + + #[test] + fn codec_StackerDBChunkData() { + let data = StackerDBChunkData { + chunk_id: 2, + chunk_version: 3, + sig: MessageSignature::from_raw(&vec![0x44; 65]), + data: vec![ + 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + ], + }; + + let bytes = vec![ + // chunk id + 0x00, 0x00, 0x00, 0x02, // chunk version + 0x00, 0x00, 0x00, 0x03, // signature + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // length + 0x00, 0x00, 0x00, 0x0b, // data + 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + ]; + + check_codec_and_corruption::(&data, &bytes); + } + #[test] fn codec_StacksMessage() { let payloads: Vec = vec![ @@ -2049,6 +2383,52 @@ pub mod test { port: 12345, nonce: 0x12345678, }), + StacksMessageType::StackerDBHandshakeAccept( + HandshakeAcceptData { + heartbeat_interval: 0x01020304, + handshake: HandshakeData { + addrbytes: PeerAddress([ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, + ]), + port: 12345, + services: 0x0001, + node_public_key: StacksPublicKeyBuffer::from_bytes( + &hex_bytes( + "034e316be04870cef1795fba64d581cf64bad0c894b01a068fb9edf85321dcd9bb", + ) + .unwrap(), + ) + .unwrap(), + expire_block_height: 0x0102030405060708, + data_url: UrlString::try_from("https://the-new-interwebs.com:4008/the-data") + .unwrap(), + }, + }, + StackerDBHandshakeData { + rc_consensus_hash: ConsensusHash([0x01; 20]), + smart_contracts: vec![ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), ContractId::parse("SP28D54YKFCMRKXBR6BR0E4BPN57S62RSM4XEVPRP.bar").unwrap()] + } + ), + StacksMessageType::StackerDBGetChunkInv(StackerDBGetChunkInvData { + contract_id: ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + }), + StacksMessageType::StackerDBChunkInv(StackerDBChunkInvData { + chunk_versions: vec![0, 1, 2, 3], + }), + StacksMessageType::StackerDBGetChunk(StackerDBGetChunkData { + contract_id: ContractId::parse("SP8QPP8TVXYAXS1VFSERG978A6WKBF59NSYJQEMN.foo").unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + chunk_id: 2, + chunk_version: 3 + }), + StacksMessageType::StackerDBChunk(StackerDBChunkData { + chunk_id: 2, + chunk_version: 3, + sig: MessageSignature::from_raw(&vec![0x44; 65]), + data: vec![0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff] + }), ]; let mut maximal_relayers: Vec = vec![]; From f3332fdc591f8aa9bc24694b888869203520e6f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:26:27 -0500 Subject: [PATCH 05/21] feat: add bandwidth tracking for stackerdb message pushes (to be filled in later) --- src/net/connection.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/net/connection.rs b/src/net/connection.rs index 8d97377832..a7e426ca5c 100644 --- a/src/net/connection.rs +++ b/src/net/connection.rs @@ -361,6 +361,7 @@ pub struct ConnectionOptions { pub max_block_push_bandwidth: u64, pub max_microblocks_push_bandwidth: u64, pub max_transaction_push_bandwidth: u64, + pub max_stackerdb_push_bandwidth: u64, pub max_sockets: usize, pub public_ip_address: Option<(PeerAddress, u16)>, pub public_ip_request_timeout: u64, @@ -450,6 +451,7 @@ impl std::default::Default for ConnectionOptions { max_block_push_bandwidth: 0, // infinite upload bandwidth allowed max_microblocks_push_bandwidth: 0, // infinite upload bandwidth allowed max_transaction_push_bandwidth: 0, // infinite upload bandwidth allowed + max_stackerdb_push_bandwidth: 0, // infinite upload bandwidth allowed max_sockets: 800, // maximum number of client sockets we'll ever register public_ip_address: None, // resolve it at runtime by default public_ip_request_timeout: 60, // how often we can attempt to look up our public IP address From 230d7d7b9c4b22e942b25cbfb778a523b53b775b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:26:51 -0500 Subject: [PATCH 06/21] feat: stackerdb message structs --- src/net/mod.rs | 123 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 119 insertions(+), 4 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index f3d9a07b95..ebeb9d96e2 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -66,6 +66,8 @@ use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::DBConn; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::UrlString; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; use clarity::vm::types::TraitIdentifier; use clarity::vm::{ analysis::contract_interface_builder::ContractInterface, types::PrincipalData, ClarityName, @@ -135,6 +137,9 @@ pub mod prune; pub mod relay; pub mod rpc; pub mod server; +pub mod stackerdb; + +use crate::net::stackerdb::StackerDBSyncResult; #[derive(Debug)] pub enum Error { @@ -856,6 +861,7 @@ pub struct HandshakeData { pub enum ServiceFlags { RELAY = 0x01, RPC = 0x02, + STACKERDB = 0x04, } #[derive(Debug, Clone, PartialEq)] @@ -904,6 +910,92 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +/// Make QualifiedContractIdentifier usable to the networking code +#[derive(Debug, Clone, PartialEq)] +pub struct ContractId(QualifiedContractIdentifier); +impl ContractId { + pub fn new(addr: StacksAddress, name: ContractName) -> ContractId { + let id_addr = StandardPrincipalData(addr.version, addr.bytes.0); + ContractId(QualifiedContractIdentifier::new(id_addr, name)) + } + + pub fn address(&self) -> StacksAddress { + StacksAddress { + version: self.0.issuer.0, + bytes: Hash160(self.0.issuer.1.clone()), + } + } + + pub fn name(&self) -> ContractName { + self.0.name.clone() + } + + pub fn parse(txt: &str) -> Option { + QualifiedContractIdentifier::parse(txt) + .ok() + .map(|qc| ContractId(qc)) + } +} + +impl fmt::Display for ContractId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", &self.0) + } +} + +/// Inform the remote peer of (a page of) the list of stacker DB contracts this node supports +#[derive(Debug, Clone, PartialEq)] +pub struct StackerDBHandshakeData { + /// current reward cycle ID + pub rc_consensus_hash: ConsensusHash, + /// list of smart contracts that we index. + /// there can be as many as 256 entries. + pub smart_contracts: Vec, +} + +/// Request for a chunk inventory +#[derive(Debug, Clone, PartialEq)] +pub struct StackerDBGetChunkInvData { + /// smart contract being used to determine chunk quantity and order + pub contract_id: ContractId, + /// consensus hash of the sortition that started this reward cycle + pub rc_consensus_hash: ConsensusHash, +} + +/// Inventory bitvector for chunks this node contains +#[derive(Debug, Clone, PartialEq)] +pub struct StackerDBChunkInvData { + /// version vector of chunks available. + /// The max-length is a protocol constant. + pub chunk_versions: Vec, +} + +/// Request for a stacker DB chunk. +#[derive(Debug, Clone, PartialEq)] +pub struct StackerDBGetChunkData { + /// smart contract being used to determine chunk quantity and order + pub contract_id: ContractId, + /// consensus hash of the sortition that started this reward cycle + pub rc_consensus_hash: ConsensusHash, + /// chunk ID (i.e. the ith bit) + pub chunk_id: u32, + /// last-seen chunk version + pub chunk_version: u32, +} + +/// Stacker DB chunk +#[derive(Debug, Clone, PartialEq)] +pub struct StackerDBChunkData { + /// chunk ID (i.e. the ith bit) + pub chunk_id: u32, + /// chunk version (a lamport clock) + pub chunk_version: u32, + /// signature from the stacker over (reward cycle consensus hash, chunk id, chunk version, chunk sha512/256) + pub sig: MessageSignature, + /// the chunk data + pub data: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct RelayData { pub peer: NeighborAddress, @@ -932,6 +1024,12 @@ pub enum StacksMessageType { Pong(PongData), NatPunchRequest(u32), NatPunchReply(NatPunchData), + // stacker DB + StackerDBHandshakeAccept(HandshakeAcceptData, StackerDBHandshakeData), + StackerDBGetChunkInv(StackerDBGetChunkInvData), + StackerDBChunkInv(StackerDBChunkInvData), + StackerDBGetChunk(StackerDBGetChunkData), + StackerDBChunk(StackerDBChunkData), } /// Peer address variants @@ -1684,6 +1782,12 @@ pub enum StacksMessageID { Pong = 16, NatPunchRequest = 17, NatPunchReply = 18, + // stackerdb + StackerDBHandshakeAccept = 19, + StackerDBGetChunkInv = 21, + StackerDBChunkInv = 22, + StackerDBGetChunk = 23, + StackerDBChunk = 24, // reserved Reserved = 255, } @@ -1948,6 +2052,7 @@ pub struct NetworkResult { pub uploaded_microblocks: Vec, // microblocks sent to us by the http server pub attachments: Vec<(AttachmentInstance, Attachment)>, pub synced_transactions: Vec, // transactions we downloaded via a mempool sync + pub stacker_db_sync_results: Vec, // chunks for stacker DBs we downloaded pub num_state_machine_passes: u64, pub num_inv_sync_passes: u64, pub num_download_passes: u64, @@ -1974,6 +2079,7 @@ impl NetworkResult { uploaded_microblocks: vec![], attachments: vec![], synced_transactions: vec![], + stacker_db_sync_results: vec![], num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, @@ -2083,6 +2189,10 @@ impl NetworkResult { } } } + + pub fn consume_stacker_db_sync_results(&mut self, mut msgs: Vec) { + self.stacker_db_sync_results.append(&mut msgs); + } } pub trait Requestable: std::fmt::Display { @@ -2855,7 +2965,7 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let burnchain_view = { let chaintip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - SortitionDB::get_burnchain_view(&sortdb.conn(), &config.burnchain, &chaintip) + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &config.burnchain, &chaintip) .unwrap() }; let mut peer_network = PeerNetwork::new( @@ -2893,7 +3003,7 @@ pub mod test { let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); SortitionDB::get_burnchain_view( - &sortdb.conn(), + &sortdb.index_conn(), &self.config.burnchain, &chaintip, ) @@ -3860,8 +3970,13 @@ pub mod test { pub fn get_burnchain_view(&mut self) -> Result { let sortdb = self.sortdb.take().unwrap(); let view_res = { - let chaintip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - SortitionDB::get_burnchain_view(&sortdb.conn(), &self.config.burnchain, &chaintip) + let chaintip = + SortitionDB::get_canonical_burn_chain_tip(&sortdb.index_conn()).unwrap(); + SortitionDB::get_burnchain_view( + &sortdb.index_conn(), + &self.config.burnchain, + &chaintip, + ) }; self.sortdb = Some(sortdb); view_res From 5193ef80151817262cb0387741d8881bb79f3d3c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:27:09 -0500 Subject: [PATCH 07/21] chore: codec for [u8; 20] --- stacks-common/src/codec/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/stacks-common/src/codec/mod.rs b/stacks-common/src/codec/mod.rs index b0eca1e255..6145e60b22 100644 --- a/stacks-common/src/codec/mod.rs +++ b/stacks-common/src/codec/mod.rs @@ -95,6 +95,18 @@ impl_stacks_message_codec_for_int!(u32; [0; 4]); impl_stacks_message_codec_for_int!(u64; [0; 8]); impl_stacks_message_codec_for_int!(i64; [0; 8]); +impl StacksMessageCodec for [u8; 20] { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), Error> { + fd.write_all(self).map_err(Error::WriteError) + } + + fn consensus_deserialize(fd: &mut R) -> Result<[u8; 20], Error> { + let mut buf = [0u8; 20]; + fd.read_exact(&mut buf).map_err(Error::ReadError)?; + Ok(buf) + } +} + impl StacksMessageCodec for [u8; 32] { fn consensus_serialize(&self, fd: &mut W) -> Result<(), Error> { fd.write_all(self).map_err(Error::WriteError) From 04b78ff023435eead634151a3d724bf6064ae47e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:27:17 -0500 Subject: [PATCH 08/21] chore: API sync --- src/net/inv.rs | 1 + src/net/p2p.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/net/inv.rs b/src/net/inv.rs index f11b101eb7..a21bb2eaa8 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -3867,6 +3867,7 @@ mod test { burn_stable_block_height: 12340, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; burnchain_view.make_test_data(); diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 62070ea5a8..87c72f4422 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -4952,7 +4952,7 @@ impl PeerNetwork { &self.local_peer, sn.block_height ); let new_chain_view = - SortitionDB::get_burnchain_view(&sortdb.conn(), &self.burnchain, &sn)?; + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &self.burnchain, &sn)?; let new_chain_view_stable_consensus_hash = { let ic = sortdb.index_conn(); @@ -5528,6 +5528,7 @@ mod test { burn_stable_block_height: 12339, burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), }; burnchain_view.make_test_data(); From f9ef8e823f2deb9878b8bd715be109376b456c82 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 18:27:33 -0500 Subject: [PATCH 09/21] chore: stub files for stacker db implementation --- src/net/stackerdb/mod.rs | 37 ++++++++++++++++++++++++++++++++++ src/net/stackerdb/tests/mod.rs | 15 ++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 src/net/stackerdb/mod.rs create mode 100644 src/net/stackerdb/tests/mod.rs diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs new file mode 100644 index 0000000000..2dbe30a40c --- /dev/null +++ b/src/net/stackerdb/mod.rs @@ -0,0 +1,37 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(test)] +pub mod tests; + +use std::collections::HashSet; + +use crate::net::ContractId; +use crate::net::NeighborKey; +use crate::net::StackerDBChunkData; + +/// maximum chunk inventory size +pub const STACKERDB_INV_MAX: u32 = 4096; + +/// Final result of synchronizing state with a remote set of DB replicas +pub struct StackerDBSyncResult { + /// which contract this is a replica for + pub contract_id: ContractId, + /// list of data to store + pub chunks_to_store: Vec, + /// dead neighbors we can disconnect from + pub dead: HashSet, +} diff --git a/src/net/stackerdb/tests/mod.rs b/src/net/stackerdb/tests/mod.rs new file mode 100644 index 0000000000..4b91359b01 --- /dev/null +++ b/src/net/stackerdb/tests/mod.rs @@ -0,0 +1,15 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . From 31d4db0153ca7153b01fe64610be77d50f2b71af Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 4 Feb 2023 19:14:28 -0500 Subject: [PATCH 10/21] chore: API sync --- testnet/stacks-node/src/neon_node.rs | 3 ++- testnet/stacks-node/src/node.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dce07cc854..9c53430ffa 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4011,7 +4011,8 @@ impl StacksNode { let view = { let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) .expect("Failed to get sortition tip"); - SortitionDB::get_burnchain_view(&sortdb.conn(), &burnchain, &sortition_tip).unwrap() + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) + .unwrap() }; let peerdb = Self::setup_peer_db(config, &burnchain); diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 78f5838309..c89ead6586 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -483,7 +483,8 @@ impl Node { let view = { let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) .expect("Failed to get sortition tip"); - SortitionDB::get_burnchain_view(&sortdb.conn(), &burnchain, &sortition_tip).unwrap() + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) + .unwrap() }; // create a new peerdb From c2d3ebd41d1ddaceda1bab001cfdbd656e18a47d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 6 Feb 2023 18:24:23 -0500 Subject: [PATCH 11/21] chore: documentation on the design of the stacker DB system --- src/net/stackerdb/mod.rs | 96 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs index 2dbe30a40c..2e5e5317ff 100644 --- a/src/net/stackerdb/mod.rs +++ b/src/net/stackerdb/mod.rs @@ -14,6 +14,102 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// # The StackerDB System +/// +/// A StackerDB is a best-effort replicated database controlled by a smart contract, which Stacks +/// node operators can opt-in to hosting. Unlike a smart contract's data space, a StackerDB's +/// data is not consensus-critical -- nodes do not need to read its state to validate the +/// blockchain. Instead, developers use StackerDBs to host and replicate auxiliary smart contract +/// data for the purposes of some (off-chain) application in a best-effort manner. In doing so, +/// Stacks-powered applications are able to leverage the Stacks peer-to-peer node network to host +/// and dissiminate their data without incuring the cost and performance penalties of bundling it +/// within a transaction. +/// +/// ## Data Model +/// +/// Data within a StackerDB is eventually-consistent. In the absence of writes and network +/// partitions, all replicas will receive the latest data in a finite number of protocol rounds, +/// with high probability. Given that network partitions in the peer-to-peer network are assumed +/// to be temporary, we assume that all StackerDB instances will receive the latest state in finite time. +/// Beyond this, it makes no guarantees about how quickly a write will materialize on a given replica. +/// +/// The StackerDB schema is chunk-oriented. Each StackerDB contains a fixed number of fixed-size +/// chunks. A `write` to a StackerDB is the act of replacing one chunk's data with new data, and a +/// `read` on a StackerDB is the act of loading one chunk from the node's local replica. Reading +/// and writing a single chunk on one node is atomic. StackerDB replication proceeds in a +/// store-and-forward manner -- newly-discovered chunks are stored to the node's local replica and +/// broadcast to a subset of neighbors who also replicate the given StackerDB. +/// +/// Each chunk has an associated Lamport clock, and an associated public key hash used to +/// authenticate writes. The Lamport clock is used to identify the latest version of a chunk -- +/// a node will replace an existing but stale copy of a chunk with a newly-downloaded chunk if its +/// Lamport clock has a strictly higher value. The chunk's metadata -- its ID, Lamport clock, and +/// data hash -- must be signed by the chunk's public key hash's associated private key in order to +/// be stored. The chunks themselves are ordered byte sequences with no mandatory internal +/// structure. +/// +/// StackerDB state is ephemeral. Its contents are dropped at the start of every reward cycle. +/// Endpoints must re-replicate data to the StackerDB if they wish to keep it online. In doing so, +/// the set of StackerDBs is self-administrating -- a node will only store state for active +/// StackerDBs. +/// +/// ## Control Plane +/// +/// The smart contract to which a StackerDB is bound controls how many chunks the DB has, who can +/// write to which chunks (identified by public key hash), how big a chunk is, and how often a +/// chunk can be written to (in wall-clock time). This smart contract is queried once per reward cycle +/// in order to configure the database. The act of configuring the re-configuring the database +/// is also the act of dropping and reinstantiating it. +/// +/// Applications that employ StackerDBs would deploy one or more smart contracts that list out +/// which users can store data to the StackerDB replica, and how much space they get. +/// +/// ## Replication Protocol +/// +/// StackerDB replication proceeds in a three-part protocol: discovery, inventory query, and +/// chunk exchange. The discovery protocol leverages the Stacks node's neighbor-walk algorithm to +/// discover which StackerDBs other nodes claim to replicate. On receipt of a `Handshake` message, +/// a StackerDB-aware node replies with a `StackerDBHandshakeAccept` message which encodes both the +/// contents of a `HandshakeAccept` message as well as a list of local StackerDBs (identified by +/// their smart contracts' addresses). Upon receipt of a `StackerDBHandshakeAccept`, the node +/// stores the list of smart contracts in its `PeerDB` as part of the network frontier state. In +/// doing so, nodes eventually learn of all of the StackerDBs replicated by all other nodes. To +/// bound the size of this state, the protocol mandates that a node can only replicate up to 256 +/// StackerDBs. +/// +/// When a node begins to replicate a StackerDB, it first queries the `PeerDB` for the set of nodes +/// that claim to have copies. This set, called the "DB neighbors", is ddistinct from the set +/// of neighbors the node uses to replicate blocks and transactions. It then connects +/// to these nodes with a `Handshake` / `StackerDBHandshakeAccept` exchange (if the neighbor walk +/// has not done so already), and proceeds to query each DB's chunk inventories. +/// +/// The chunk inventory is simply a vector of all of the remote peers' chunks' versions. +/// Once the node has received all chunk inventories from its neighbors, it schedules them for +/// download by prioritizing them by newest-first, and then by rarest-first, in order to ensure +/// that the latest, least-replicated data is downloaded first. +/// +/// Once the node has computed its download schedule, it queries its DB neighbors for chunks with +/// the given versions. Upon receipt of a chunk, the node verifies the signature on the chunk's +/// metadata, verifies that the chunk data hashes to the metadata's indicated data hash, and stores +/// the chunk. It will then select neighbors to which to broadcast this chunk, inferring from the +/// download schedule which DB neighbors have yet to process this particular version of the chunk. +/// +/// ## Comparison to other Stacks storage +/// +/// StackerDBs differ from AtlasDBs in that data chunks are not authenticated by the blockchain, +/// but instead are authenticated by public key hashes made available from a smart contract. As +/// such, a node can begin replicating a StackerDB whenever its operator wants -- it does not need +/// to re-synchronize blockchain state to get the list of chunk hashes. Furthermore, StackerDB +/// state can be written to as fast as the smart contract permits -- there is no need to wait for a +/// corresponding transaction to confirm. +/// +/// StackerDBs differ from Gaia in that Stacks nodes are the principal means of storing data. Any +/// reachable Stacks node can fulfill requests for chunks. It is up to the StackerDB maintainer to +/// convince node operators to replicate StackerDBs on their behalf. In addition, StackerDB state +/// is ephemeral -- its longevity in the system depends on application endpoints re-replicating the +/// state periodically (whereas Gaia stores data for as long as the back-end storage provider's SLA +/// indicates). + #[cfg(test)] pub mod tests; From 3a64b106f9c3c8706c9153d03298afeabee16cb8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 18:24:49 -0500 Subject: [PATCH 12/21] chore: rustdoc burnchain --- src/burnchains/mod.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 2b757c6cf4..d2f7aa99fb 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -516,12 +516,18 @@ impl PoxConstants { /// Structure for encoding our view of the network #[derive(Debug, PartialEq, Clone)] pub struct BurnchainView { - pub burn_block_height: u64, // last-seen block height (at chain tip) - pub burn_block_hash: BurnchainHeaderHash, // last-seen burn block hash - pub burn_stable_block_height: u64, // latest stable block height (e.g. chain tip minus 7) - pub burn_stable_block_hash: BurnchainHeaderHash, // latest stable burn block hash - pub last_burn_block_hashes: HashMap, // map all block heights from burn_block_height back to the oldest one we'll take for considering the peer a neighbor - pub rc_consensus_hash: ConsensusHash, // consensus hash of the current reward cycle's start block + /// last-seen block height (at chain tip) + pub burn_block_height: u64, + /// last-seen burn block hash + pub burn_block_hash: BurnchainHeaderHash, + /// latest stable block height (e.g. chain tip minus 7) + pub burn_stable_block_height: u64, + /// latest stable burn block hash + pub burn_stable_block_hash: BurnchainHeaderHash, + /// map all block heights from burn_block_height back to the oldest one we'll take for considering the peer a neighbor + pub last_burn_block_hashes: HashMap, + /// consensus hash of the current reward cycle's start block + pub rc_consensus_hash: ConsensusHash, } /// The burnchain block's encoded state transition: From 01f1e909aac0b48e128471d1a07edf14ee3c0e62 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 18:25:01 -0500 Subject: [PATCH 13/21] chore: rustdoc chat --- src/net/chat.rs | 97 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 28 deletions(-) diff --git a/src/net/chat.rs b/src/net/chat.rs index e0ef27dd5e..0345bbc9c8 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -137,10 +137,14 @@ pub struct NeighborStats { pub msgs_err: u64, pub healthpoints: VecDeque, pub msg_rx_counts: HashMap, - pub block_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) - pub microblocks_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) - pub transaction_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) - pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, // (timestamp, num bytes) + /// (timestamp, num bytes) + pub block_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub microblocks_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub transaction_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, pub relayed_messages: HashMap, } @@ -169,6 +173,9 @@ impl NeighborStats { } } + /// Add a neighbor health point for this peer. + /// This updates the recent list of instances where this peer either successfully replied to a + /// message, or failed to do so (indicated by `success`). pub fn add_healthpoint(&mut self, success: bool) -> () { let hp = NeighborHealthPoint { success: success, @@ -180,6 +187,9 @@ impl NeighborStats { } } + /// Record that we recently received a block of the given size. + /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// bandwidth consumed by block pushes. pub fn add_block_push(&mut self, message_size: u64) -> () { self.block_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); @@ -188,6 +198,9 @@ impl NeighborStats { } } + /// Record that we recently received a microblock of the given size. + /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// bandwidth consumed by microblock pushes. pub fn add_microblocks_push(&mut self, message_size: u64) -> () { self.microblocks_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); @@ -196,6 +209,9 @@ impl NeighborStats { } } + /// Record that we recently received a transaction of the given size. + /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// bandwidth consumed by transaction pushes. pub fn add_transaction_push(&mut self, message_size: u64) -> () { self.transaction_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); @@ -204,6 +220,9 @@ impl NeighborStats { } } + /// Record that we recently received a stackerdb chunk push of the given size. + /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// bandwidth consumed by stackerdb chunk pushes. pub fn add_stackerdb_push(&mut self, message_size: u64) -> () { self.stackerdb_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); @@ -304,43 +323,65 @@ impl NeighborStats { /// P2P ongoing conversation with another Stacks peer pub struct ConversationP2P { + /// Instantiation timestamp in seconds since the epoch pub instantiated: u64, + /// network ID of the local end of this conversation pub network_id: u32, + /// peer version of the local end of this conversation pub version: u32, + /// Underlying inbox/outbox for protocol communication pub connection: ConnectionP2P, + /// opaque ID of the socket pub conn_id: usize, - pub burnchain: Burnchain, // copy of our burnchain config - pub heartbeat: u32, // how often do we send heartbeats? + /// copy of our burnchain config + pub burnchain: Burnchain, + /// how often do we send heartbeats? + pub heartbeat: u32, + /// network ID of the remote end of this conversation pub peer_network_id: u32, + /// peer version of the remote end of this conversation pub peer_version: u32, + /// reported services of the remote end of this conversation pub peer_services: u16, - pub peer_addrbytes: PeerAddress, // from socketaddr - pub peer_port: u16, // from socketaddr - pub handshake_addrbytes: PeerAddress, // from handshake - pub handshake_port: u16, // from handshake - pub peer_heartbeat: u32, // how often do we need to ping the remote peer? - pub peer_expire_block_height: u64, // when does the peer's key expire? - - pub data_url: UrlString, // where does this peer's data live? Set to a 0-length string if not known. - - // highest burnchain block height and burnchain block hash this peer has seen + /// from socketaddr + pub peer_addrbytes: PeerAddress, + /// from socketaddr + pub peer_port: u16, + /// from handshake + pub handshake_addrbytes: PeerAddress, + /// from handshake + pub handshake_port: u16, + /// how often do we need to ping the remote peer? + pub peer_heartbeat: u32, + /// when does the peer's key expire? + pub peer_expire_block_height: u64, + + /// where does this peer's data live? Set to a 0-length string if not known. + pub data_url: UrlString, + + /// what this peer believes is the height of the burnchain pub burnchain_tip_height: u64, + /// what this peer believes is the hash of the burnchain tip pub burnchain_tip_burn_header_hash: BurnchainHeaderHash, + /// what this peer believes is the stable height of the burnchain (i.e. after a chain-specific + /// number of confirmations) pub burnchain_stable_tip_height: u64, + /// the hash of the burnchain block at height `burnchain_stable_tip_height` pub burnchain_stable_tip_burn_header_hash: BurnchainHeaderHash, + /// Statistics about this peer from this conversation pub stats: NeighborStats, - // which stacker DBs this peer replicates + /// which stacker DBs this peer replicates pub db_smart_contracts: Vec, - // outbound replies + /// outbound replies pub reply_handles: VecDeque, - // system epochs + /// system epochs epochs: Vec, } @@ -649,14 +690,14 @@ impl ConversationP2P { self.burnchain_stable_tip_burn_header_hash.clone() } - /// Does this remote neighbor support the mempool query interface? It will if it has both + /// Does the given services bitfield mempool query interface? It will if it has both /// RELAY and RPC bits set. pub fn supports_mempool_query(peer_services: u16) -> bool { let expected_bits = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); (peer_services & expected_bits) == expected_bits } - /// Does this remote neighbor support stacker DBs? It will if it has the STACKERDB bit set + /// Does the given services bitfield support stacker DBs? It will if it has the STACKERDB bit set pub fn supports_stackerdb(peer_services: u16) -> bool { (peer_services & (ServiceFlags::STACKERDB as u16)) != 0 } @@ -1753,7 +1794,7 @@ impl ConversationP2P { relayers: &Vec, ) -> bool { if !ConversationP2P::check_relayer_cycles(relayers) { - debug!( + warn!( "Invalid relayers -- message from {:?} contains a cycle", self.to_neighbor_key() ); @@ -1761,7 +1802,7 @@ impl ConversationP2P { } if !ConversationP2P::check_relayers_remote(local_peer, relayers) { - debug!( + warn!( "Invalid relayers -- message originates from us ({})", local_peer.to_neighbor_addr() ); @@ -1788,7 +1829,7 @@ impl ConversationP2P { assert!(preamble.payload_len > 5); // don't count 1-byte type prefix + 4 byte vector length if !self.process_relayers(local_peer, preamble, &relayers) { - debug!("Drop pushed blocks -- invalid relayers {:?}", &relayers); + warn!("Drop pushed blocks -- invalid relayers {:?}", &relayers); self.stats.msgs_err += 1; return Err(net_error::InvalidMessage); } @@ -1825,7 +1866,7 @@ impl ConversationP2P { assert!(preamble.payload_len > 5); // don't count 1-byte type prefix + 4 byte vector length if !self.process_relayers(local_peer, preamble, &relayers) { - debug!( + warn!( "Drop pushed microblocks -- invalid relayers {:?}", &relayers ); @@ -1860,7 +1901,7 @@ impl ConversationP2P { assert!(preamble.payload_len > 1); // don't count 1-byte type prefix if !self.process_relayers(local_peer, preamble, &relayers) { - debug!( + warn!( "Drop pushed transaction -- invalid relayers {:?}", &relayers ); @@ -1883,7 +1924,7 @@ impl ConversationP2P { Ok(None) } - /// Validate a pushed stackerdb chunk + /// Validate a pushed stackerdb chunk. /// Update bandwidth accounting, but forward the stackerdb chunk along. /// Possibly return a reply handle for a NACK if we throttle the remote sender fn validate_stackerdb_push( @@ -1896,7 +1937,7 @@ impl ConversationP2P { assert!(preamble.payload_len > 1); // don't count 1-byte type prefix if !self.process_relayers(local_peer, preamble, &relayers) { - debug!( + warn!( "Drop pushed stackerdb chunk -- invalid relayers {:?}", &relayers ); From ab3b929a4611ca8b4aa16c541aef0c3c12eb584e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 18:25:13 -0500 Subject: [PATCH 14/21] chore: use helpers to serialize/deserialize qualified contract identifiers --- src/net/codec.rs | 53 ++++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/src/net/codec.rs b/src/net/codec.rs index 3b28962865..0ae344793c 100644 --- a/src/net/codec.rs +++ b/src/net/codec.rs @@ -752,21 +752,34 @@ impl StacksMessageCodec for MemPoolSyncData { } } -impl StacksMessageCodec for ContractId { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - write_next(fd, &self.0.issuer.0)?; - write_next(fd, &self.0.issuer.1)?; - write_next(fd, &self.0.name)?; - Ok(()) - } +/// We can't implement StacksMessageCodec directly for T: QualifiedContractIdentifierExtension, so +/// we have to resort to these crude methods. +fn contract_id_consensus_serialize( + fd: &mut W, + cid: &T, +) -> Result<(), codec_error> { + let addr = cid.address(); + let name = cid.name(); + write_next(fd, &addr.version)?; + write_next(fd, &addr.bytes.0)?; + write_next(fd, &name)?; + Ok(()) +} - fn consensus_deserialize(fd: &mut R) -> Result { - let version: u8 = read_next(fd)?; - let bytes: [u8; 20] = read_next(fd)?; - let name: ContractName = read_next(fd)?; - let qn = QualifiedContractIdentifier::new(StandardPrincipalData(version, bytes), name); - Ok(ContractId(qn)) - } +fn contract_id_consensus_deserialize( + fd: &mut R, +) -> Result { + let version: u8 = read_next(fd)?; + let bytes: [u8; 20] = read_next(fd)?; + let name: ContractName = read_next(fd)?; + let qn = T::new( + StacksAddress { + version, + bytes: Hash160(bytes), + }, + name, + ); + Ok(qn) } impl StacksMessageCodec for StackerDBHandshakeData { @@ -779,7 +792,7 @@ impl StacksMessageCodec for StackerDBHandshakeData { write_next(fd, &self.rc_consensus_hash)?; write_next(fd, &len_u8)?; for cid in self.smart_contracts.iter() { - write_next(fd, cid)?; + contract_id_consensus_serialize(fd, cid)?; } Ok(()) } @@ -789,7 +802,7 @@ impl StacksMessageCodec for StackerDBHandshakeData { let len_u8: u8 = read_next(fd)?; let mut smart_contracts = Vec::with_capacity(len_u8 as usize); for _ in 0..len_u8 { - let cid: ContractId = read_next(fd)?; + let cid: ContractId = contract_id_consensus_deserialize(fd)?; smart_contracts.push(cid); } Ok(StackerDBHandshakeData { @@ -801,13 +814,13 @@ impl StacksMessageCodec for StackerDBHandshakeData { impl StacksMessageCodec for StackerDBGetChunkInvData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - write_next(fd, &self.contract_id)?; + contract_id_consensus_serialize(fd, &self.contract_id)?; write_next(fd, &self.rc_consensus_hash)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { - let contract_id: ContractId = read_next(fd)?; + let contract_id: ContractId = contract_id_consensus_deserialize(fd)?; let rc_consensus_hash: ConsensusHash = read_next(fd)?; Ok(StackerDBGetChunkInvData { contract_id, @@ -833,7 +846,7 @@ impl StacksMessageCodec for StackerDBChunkInvData { impl StacksMessageCodec for StackerDBGetChunkData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - write_next(fd, &self.contract_id)?; + contract_id_consensus_serialize(fd, &self.contract_id)?; write_next(fd, &self.rc_consensus_hash)?; write_next(fd, &self.chunk_id)?; write_next(fd, &self.chunk_version)?; @@ -841,7 +854,7 @@ impl StacksMessageCodec for StackerDBGetChunkData { } fn consensus_deserialize(fd: &mut R) -> Result { - let contract_id: ContractId = read_next(fd)?; + let contract_id: ContractId = contract_id_consensus_deserialize(fd)?; let rc_consensus_hash: ConsensusHash = read_next(fd)?; let chunk_id: u32 = read_next(fd)?; let chunk_version: u32 = read_next(fd)?; From a91d37b6bde82d7be7ad4301b53e724826d7a816 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 18:25:28 -0500 Subject: [PATCH 15/21] fix: use an extension trait for qualified contract identifiers, and alias QualifiedContractIdentifier to ContractId --- src/net/mod.rs | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index ebeb9d96e2..0c43cee5df 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -911,37 +911,39 @@ pub enum MemPoolSyncData { } /// Make QualifiedContractIdentifier usable to the networking code -#[derive(Debug, Clone, PartialEq)] -pub struct ContractId(QualifiedContractIdentifier); -impl ContractId { - pub fn new(addr: StacksAddress, name: ContractName) -> ContractId { +pub trait QualifiedContractIdentifierExtension { + fn new(addr: StacksAddress, name: ContractName) -> Self; + fn address(&self) -> StacksAddress; + fn name(&self) -> ContractName; + fn parse(txt: &str) -> Option + where + Self: Sized; +} + +impl QualifiedContractIdentifierExtension for QualifiedContractIdentifier { + fn new(addr: StacksAddress, name: ContractName) -> QualifiedContractIdentifier { let id_addr = StandardPrincipalData(addr.version, addr.bytes.0); - ContractId(QualifiedContractIdentifier::new(id_addr, name)) + QualifiedContractIdentifier::new(id_addr, name) } - pub fn address(&self) -> StacksAddress { + fn address(&self) -> StacksAddress { StacksAddress { - version: self.0.issuer.0, - bytes: Hash160(self.0.issuer.1.clone()), + version: self.issuer.0, + bytes: Hash160(self.issuer.1.clone()), } } - pub fn name(&self) -> ContractName { - self.0.name.clone() + fn name(&self) -> ContractName { + self.name.clone() } - pub fn parse(txt: &str) -> Option { - QualifiedContractIdentifier::parse(txt) - .ok() - .map(|qc| ContractId(qc)) + fn parse(txt: &str) -> Option { + QualifiedContractIdentifier::parse(txt).ok() } } -impl fmt::Display for ContractId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", &self.0) - } -} +/// short-hand type alias +pub type ContractId = QualifiedContractIdentifier; /// Inform the remote peer of (a page of) the list of stacker DB contracts this node supports #[derive(Debug, Clone, PartialEq)] @@ -983,7 +985,7 @@ pub struct StackerDBGetChunkData { pub chunk_version: u32, } -/// Stacker DB chunk +/// Stacker DB chunk reply to a StackerDBGetChunkData #[derive(Debug, Clone, PartialEq)] pub struct StackerDBChunkData { /// chunk ID (i.e. the ith bit) From 548ed95c3ebcfa3f518aa33e8e61de1aa37c1052 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 18:25:46 -0500 Subject: [PATCH 16/21] chore: comment where handshake handling happens --- src/net/stackerdb/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs index 2e5e5317ff..dbf7c0165c 100644 --- a/src/net/stackerdb/mod.rs +++ b/src/net/stackerdb/mod.rs @@ -75,7 +75,7 @@ /// stores the list of smart contracts in its `PeerDB` as part of the network frontier state. In /// doing so, nodes eventually learn of all of the StackerDBs replicated by all other nodes. To /// bound the size of this state, the protocol mandates that a node can only replicate up to 256 -/// StackerDBs. +/// StackerDBs. The handshake-handling code happens in src::net::handle_handshake(). /// /// When a node begins to replicate a StackerDB, it first queries the `PeerDB` for the set of nodes /// that claim to have copies. This set, called the "DB neighbors", is ddistinct from the set From 360906aa9ec400385cdafca08b0179a50482d42a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 20 Jul 2023 16:56:42 +0000 Subject: [PATCH 17/21] Update src/net/stackerdb/mod.rs Co-authored-by: Brice Dobry --- src/net/stackerdb/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs index dbf7c0165c..0b2ce7775f 100644 --- a/src/net/stackerdb/mod.rs +++ b/src/net/stackerdb/mod.rs @@ -22,7 +22,7 @@ /// blockchain. Instead, developers use StackerDBs to host and replicate auxiliary smart contract /// data for the purposes of some (off-chain) application in a best-effort manner. In doing so, /// Stacks-powered applications are able to leverage the Stacks peer-to-peer node network to host -/// and dissiminate their data without incuring the cost and performance penalties of bundling it +/// and disseminate their data without incuring the cost and performance penalties of bundling it /// within a transaction. /// /// ## Data Model From 21cf846e307a7ccf0dfbbc5d5eca46bcfd4bc132 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 20 Jul 2023 16:56:49 +0000 Subject: [PATCH 18/21] Update src/net/stackerdb/mod.rs Co-authored-by: Brice Dobry --- src/net/stackerdb/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs index 0b2ce7775f..8662428a4b 100644 --- a/src/net/stackerdb/mod.rs +++ b/src/net/stackerdb/mod.rs @@ -58,7 +58,7 @@ /// The smart contract to which a StackerDB is bound controls how many chunks the DB has, who can /// write to which chunks (identified by public key hash), how big a chunk is, and how often a /// chunk can be written to (in wall-clock time). This smart contract is queried once per reward cycle -/// in order to configure the database. The act of configuring the re-configuring the database +/// in order to configure the database. The act of re-configuring the database /// is also the act of dropping and reinstantiating it. /// /// Applications that employ StackerDBs would deploy one or more smart contracts that list out From 9a34a0e0299fcdad6743f1b4a33da720ed2359a6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 20 Jul 2023 16:56:59 +0000 Subject: [PATCH 19/21] Update src/net/stackerdb/mod.rs Co-authored-by: Brice Dobry --- src/net/stackerdb/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/stackerdb/mod.rs b/src/net/stackerdb/mod.rs index 8662428a4b..e966a177fb 100644 --- a/src/net/stackerdb/mod.rs +++ b/src/net/stackerdb/mod.rs @@ -78,7 +78,7 @@ /// StackerDBs. The handshake-handling code happens in src::net::handle_handshake(). /// /// When a node begins to replicate a StackerDB, it first queries the `PeerDB` for the set of nodes -/// that claim to have copies. This set, called the "DB neighbors", is ddistinct from the set +/// that claim to have copies. This set, called the "DB neighbors", is distinct from the set /// of neighbors the node uses to replicate blocks and transactions. It then connects /// to these nodes with a `Handshake` / `StackerDBHandshakeAccept` exchange (if the neighbor walk /// has not done so already), and proceeds to query each DB's chunk inventories. From f7b376d7d5fc0df2083a9abd13fd1b728c268a69 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 24 Jul 2023 16:34:26 -0400 Subject: [PATCH 20/21] chore: add more test coverage to chat.rs --- src/net/chat.rs | 3027 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 2244 insertions(+), 783 deletions(-) diff --git a/src/net/chat.rs b/src/net/chat.rs index de6234b914..da51ecd853 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -89,10 +89,10 @@ impl Default for NeighborHealthPoint { pub const NUM_HEALTH_POINTS: usize = 32; pub const HEALTH_POINT_LIFETIME: u64 = 12 * 3600; // 12 hours -/// The max number of data points to gather for block/microblock/transaction push messages from a neighbor -pub const NUM_BLOCK_POINTS: usize = 32; +/// The max number of data points to gather for block/microblock/transaction/stackerdb push messages from a neighbor +pub const NUM_BANDWIDTH_POINTS: usize = 32; /// The number of seconds a block data point is valid for the purpose of computing stats -pub const BLOCK_POINT_LIFETIME: u64 = 600; +pub const BANDWIDTH_POINT_LIFETIME: u64 = 600; pub const MAX_PEER_HEARTBEAT_INTERVAL: usize = 3600 * 6; // 6 hours @@ -190,45 +190,45 @@ impl NeighborStats { } /// Record that we recently received a block of the given size. - /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by block pushes. pub fn add_block_push(&mut self, message_size: u64) -> () { self.block_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); - while self.block_push_rx_counts.len() > NUM_BLOCK_POINTS { + while self.block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { self.block_push_rx_counts.pop_front(); } } /// Record that we recently received a microblock of the given size. - /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by microblock pushes. pub fn add_microblocks_push(&mut self, message_size: u64) -> () { self.microblocks_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); - while self.microblocks_push_rx_counts.len() > NUM_BLOCK_POINTS { + while self.microblocks_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { self.microblocks_push_rx_counts.pop_front(); } } /// Record that we recently received a transaction of the given size. - /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by transaction pushes. pub fn add_transaction_push(&mut self, message_size: u64) -> () { self.transaction_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); - while self.transaction_push_rx_counts.len() > NUM_BLOCK_POINTS { + while self.transaction_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { self.transaction_push_rx_counts.pop_front(); } } /// Record that we recently received a stackerdb chunk push of the given size. - /// Keeps track of the last `NUM_BLOCK_POINTS` such events, so we can estimate the current + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current /// bandwidth consumed by stackerdb chunk pushes. pub fn add_stackerdb_push(&mut self, message_size: u64) -> () { self.stackerdb_push_rx_counts .push_back((get_epoch_time_secs(), message_size)); - while self.stackerdb_push_rx_counts.len() > NUM_BLOCK_POINTS { + while self.stackerdb_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { self.stackerdb_push_rx_counts.pop_front(); } } @@ -299,22 +299,22 @@ impl NeighborStats { /// Get a peer's total block-push bandwidth usage. pub fn get_block_push_bandwidth(&self) -> f64 { - NeighborStats::get_bandwidth(&self.block_push_rx_counts, BLOCK_POINT_LIFETIME) + NeighborStats::get_bandwidth(&self.block_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } /// Get a peer's total microblock-push bandwidth usage. pub fn get_microblocks_push_bandwidth(&self) -> f64 { - NeighborStats::get_bandwidth(&self.microblocks_push_rx_counts, BLOCK_POINT_LIFETIME) + NeighborStats::get_bandwidth(&self.microblocks_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } /// Get a peer's total transaction-push bandwidth usage pub fn get_transaction_push_bandwidth(&self) -> f64 { - NeighborStats::get_bandwidth(&self.transaction_push_rx_counts, BLOCK_POINT_LIFETIME) + NeighborStats::get_bandwidth(&self.transaction_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } /// Get a peer's total stackerdb-push bandwidth usage pub fn get_stackerdb_push_bandwidth(&self) -> f64 { - NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BLOCK_POINT_LIFETIME) + NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } /// Determine how many of a particular message this peer has received @@ -612,21 +612,6 @@ impl ConversationP2P { } } - pub fn best_effort_neighbor_key(&self) -> NeighborKey { - if self.handshake_port > 0 && self.peer_version > 0 { - // got a handshake response already - self.to_handshake_neighbor_key() - } else { - // assume we know nothing from this neighbor - NeighborKey { - peer_version: self.version, - network_id: self.network_id, - addrbytes: self.peer_addrbytes.clone(), - port: self.peer_port, - } - } - } - pub fn to_neighbor_address(&self) -> NeighborAddress { let pubkh = if let Some(ref pubk) = self.ref_public_key() { Hash160::from_node_public_key(pubk) @@ -2631,6 +2616,7 @@ mod test { use clarity::vm::costs::ExecutionCost; use stacks_common::util::pipe::*; use stacks_common::util::secp256k1::*; + use stacks_common::util::sleep_ms; use stacks_common::util::uint::*; use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId}; @@ -3217,6 +3203,9 @@ mod test { &convo_1.db_smart_contracts, &convo_2.db_smart_contracts ); assert_eq!(convo_1.db_smart_contracts.len(), 1); + assert!(convo_1.replicates_stackerdb( + &ContractId::parse("SP000000000000000000002Q6VF78.sbtc").unwrap() + )); } else { assert_eq!(db_data.rc_consensus_hash, chain_view_2.rc_consensus_hash); @@ -3226,6 +3215,9 @@ mod test { &convo_1.db_smart_contracts, &convo_2.db_smart_contracts ); assert_eq!(convo_1.db_smart_contracts.len(), 0); + assert!(!convo_1.replicates_stackerdb( + &ContractId::parse("SP000000000000000000002Q6VF78.sbtc").unwrap() + )); } } _ => { @@ -3584,7 +3576,7 @@ mod test { } #[test] - fn convo_handshake_self() { + fn convo_handshake_badpeeraddress() { let conn_opts = ConnectionOptions::default(); let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); @@ -3612,7 +3604,7 @@ mod test { .unwrap(); let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_handshake_self_1", + "convo_handshake_badpeeraddress_1", &burnchain, 0x9abcdef0, 12350, @@ -3622,7 +3614,7 @@ mod test { DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_handshake_self_2", + "convo_handshake_badpeeraddress_2", &burnchain, 0x9abcdef0, 12351, @@ -3663,51 +3655,70 @@ mod test { assert!(convo_1.connection.get_public_key().is_none()); assert!(convo_2.connection.get_public_key().is_none()); - // convo_1 sends a handshake to itself (not allowed) - let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_2); + // teach each convo about each other's public keys + convo_1 + .connection + .set_public_key(Some(Secp256k1PublicKey::from_private( + &local_peer_2.private_key, + ))); + convo_2 + .connection + .set_public_key(Some(Secp256k1PublicKey::from_private( + &local_peer_1.private_key, + ))); + + assert!(convo_1.connection.get_public_key().is_some()); + assert!(convo_2.connection.get_public_key().is_some()); + + convo_1.stats.outbound = false; + convo_2.stats.outbound = true; + + convo_2.peer_port = 8080; + + // convo_1 sends a handshake from a different address than reported + let mut handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); + handshake_data_1.port = 8082; let handshake_1 = convo_1 .sign_message( &chain_view, - &local_peer_2.private_key, + &local_peer_1.private_key, StacksMessageType::Handshake(handshake_data_1.clone()), ) .unwrap(); + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); - // convo_2 receives it and processes it automatically (consuming it), and give back a handshake reject + // convo_2 receives it and processes it, and rejects it convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 .chat( &local_peer_2, - &mut peerdb_1, - &sortdb_1, - &pox_id_1, - &mut chainstate_1, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, &mut BlockHeaderCache::new(), &chain_view, ) .unwrap(); - // convo_1 gets a handshake reject and consumes it + // convo_1 gets a handshake-reject and consumes it convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 .chat( &local_peer_1, - &mut peerdb_2, - &sortdb_2, - &pox_id_2, - &mut chainstate_2, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, &mut BlockHeaderCache::new(), &chain_view, ) .unwrap(); - // get back handshake reject + // the waiting reply aborts on disconnect let reply_1 = rh_1.recv(0).unwrap(); - assert_eq!(unhandled_1.len(), 0); - assert_eq!(unhandled_2.len(), 0); - // received a valid HandshakeReject from peer 2 match reply_1.payload { StacksMessageType::HandshakeReject => {} @@ -3716,13 +3727,12 @@ mod test { } }; - // neither peer updated their info on one another - assert!(convo_1.connection.get_public_key().is_none()); - assert!(convo_2.connection.get_public_key().is_none()); + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 0); } #[test] - fn convo_ping() { + fn convo_handshake_update_key() { let conn_opts = ConnectionOptions::default(); let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); @@ -3744,13 +3754,8 @@ mod test { }; chain_view.make_test_data(); - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(); - let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_ping_1", + "convo_handshake_update_key_1", &burnchain, 0x9abcdef0, 12350, @@ -3760,7 +3765,7 @@ mod test { DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_ping_2", + "convo_handshake_update_key_2", &burnchain, 0x9abcdef0, 12351, @@ -3773,7 +3778,7 @@ mod test { db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let mut local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( @@ -3797,6 +3802,10 @@ mod test { StacksEpoch::unit_test_pre_2_05(0), ); + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); + // convo_1 sends a handshake to convo_2 let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); let handshake_1 = convo_1 @@ -3806,32 +3815,81 @@ mod test { StacksMessageType::Handshake(handshake_data_1.clone()), ) .unwrap(); - let mut rh_handshake_1 = convo_1 - .send_signed_request(handshake_1.clone(), 1000000) - .unwrap(); - // convo_1 sends a ping to convo_2 - let ping_data_1 = PingData::new(); - let ping_1 = convo_1 - .sign_message( + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); + + // convo_2 receives it + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data_1.clone()), ) .unwrap(); - let mut rh_ping_1 = convo_1 - .send_signed_request(ping_1.clone(), 1000000) + + // convo_1 has a handshakaccept + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) .unwrap(); - // convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward - // it along to the chat caller (us) - test_debug!("send handshake {:?}", &handshake_1); - test_debug!("send ping {:?}", &ping_1); - convo_send_recv( - &mut convo_1, - vec![&mut rh_handshake_1, &mut rh_ping_1], - &mut convo_2, + let reply_1 = rh_1.recv(0).unwrap(); + + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 1); + + // received a valid HandshakeAccept from peer 2 + match reply_1.payload { + StacksMessageType::HandshakeAccept(..) => {} + _ => { + assert!(false); + } + }; + + // peers learned each other's keys + assert_eq!( + convo_1.connection.get_public_key().as_ref().unwrap(), + &Secp256k1PublicKey::from_private(&local_peer_2.private_key) + ); + assert_eq!( + convo_2.connection.get_public_key().as_ref().unwrap(), + &Secp256k1PublicKey::from_private(&local_peer_1.private_key) ); + + let old_peer_1_privkey = local_peer_1.private_key.clone(); + let old_peer_1_pubkey = Secp256k1PublicKey::from_private(&old_peer_1_privkey); + + // peer 1 updates their private key + local_peer_1.private_key = Secp256k1PrivateKey::new(); + + // peer 1 re-handshakes + // convo_1 sends a handshake to convo_2 + let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); + let handshake_1 = convo_1 + .sign_message( + &chain_view, + &old_peer_1_privkey, + StacksMessageType::Handshake(handshake_data_1.clone()), + ) + .unwrap(); + + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); + + // convo_2 receives it + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 .chat( &local_peer_2, @@ -3844,14 +3902,8 @@ mod test { ) .unwrap(); - // convo_1 has a handshakeaccept - test_debug!("reply handshake-accept"); - test_debug!("send pong"); - convo_send_recv( - &mut convo_2, - vec![&mut rh_handshake_1, &mut rh_ping_1], - &mut convo_1, - ); + // convo_1 has a handshakaccept + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 .chat( &local_peer_1, @@ -3864,35 +3916,27 @@ mod test { ) .unwrap(); - let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); - let reply_ping_1 = rh_ping_1.recv(0).unwrap(); + let reply_1 = rh_1.recv(0).unwrap(); assert_eq!(unhandled_1.len(), 0); - assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed + assert_eq!(unhandled_2.len(), 1); - // convo 2 returns the handshake from convo 1 - match unhandled_2[0].payload { - StacksMessageType::Handshake(ref data) => { - assert_eq!(handshake_data_1, *data); - } - _ => { - assert!(false); - } - }; + // new keys were learned + assert_eq!( + convo_1.connection.get_public_key().as_ref().unwrap(), + &Secp256k1PublicKey::from_private(&local_peer_2.private_key) + ); + assert_eq!( + convo_2.connection.get_public_key().as_ref().unwrap(), + &Secp256k1PublicKey::from_private(&local_peer_1.private_key) + ); - // convo 2 replied to convo 1 with a matching pong - match reply_ping_1.payload { - StacksMessageType::Pong(ref data) => { - assert_eq!(data.nonce, ping_data_1.nonce); - } - _ => { - assert!(false); - } - } + assert!(convo_1.connection.get_public_key().as_ref().unwrap() != &old_peer_1_pubkey); + assert!(convo_2.connection.get_public_key().as_ref().unwrap() != &old_peer_1_pubkey); } #[test] - fn convo_handshake_ping_loop() { + fn convo_handshake_self() { let conn_opts = ConnectionOptions::default(); let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); @@ -3920,7 +3964,7 @@ mod test { .unwrap(); let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_handshake_ping_loop_1", + "convo_handshake_self_1", &burnchain, 0x9abcdef0, 12350, @@ -3930,7 +3974,7 @@ mod test { DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_handshake_ping_loop_2", + "convo_handshake_self_2", &burnchain, 0x9abcdef0, 12351, @@ -3963,157 +4007,74 @@ mod test { &socketaddr_1, &conn_opts, true, - 1, + 0, StacksEpoch::unit_test_pre_2_05(0), ); - for i in 0..5 { - // do handshake/ping over and over, with different keys. - // tests re-keying. + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); - // convo_1 sends a handshake to convo_2 - let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); - let handshake_1 = convo_1 - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Handshake(handshake_data_1.clone()), - ) - .unwrap(); - let mut rh_handshake_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); + // convo_1 sends a handshake to itself (not allowed) + let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_2); + let handshake_1 = convo_1 + .sign_message( + &chain_view, + &local_peer_2.private_key, + StacksMessageType::Handshake(handshake_data_1.clone()), + ) + .unwrap(); + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); - // convo_1 sends a ping to convo_2 - let ping_data_1 = PingData::new(); - let ping_1 = convo_1 - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data_1.clone()), - ) - .unwrap(); - let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap(); + // convo_2 receives it and processes it automatically (consuming it), and give back a handshake reject + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); - // convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward - // it along to the chat caller (us) - convo_send_recv( - &mut convo_1, - vec![&mut rh_handshake_1, &mut rh_ping_1], - &mut convo_2, - ); - let unhandled_2 = convo_2 - .chat( - &local_peer_2, - &mut peerdb_2, - &sortdb_2, - &pox_id_2, - &mut chainstate_2, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); + // convo_1 gets a handshake reject and consumes it + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); - // convo_1 has a handshakeaccept - convo_send_recv( - &mut convo_2, - vec![&mut rh_handshake_1, &mut rh_ping_1], - &mut convo_1, - ); - let unhandled_1 = convo_1 - .chat( - &local_peer_1, - &mut peerdb_1, - &sortdb_1, - &pox_id_1, - &mut chainstate_1, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); - - let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); - let reply_ping_1 = rh_ping_1.recv(0).unwrap(); - - assert_eq!(unhandled_1.len(), 0); - assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed + // get back handshake reject + let reply_1 = rh_1.recv(0).unwrap(); - // convo 2 returns the handshake from convo 1 - match unhandled_2[0].payload { - StacksMessageType::Handshake(ref data) => { - assert_eq!(handshake_data_1, *data); - } - _ => { - assert!(false); - } - }; + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 0); - // convo 2 replied to convo 1 with a matching pong - match reply_ping_1.payload { - StacksMessageType::Pong(ref data) => { - assert_eq!(data.nonce, ping_data_1.nonce); - } - _ => { - assert!(false); - } + // received a valid HandshakeReject from peer 2 + match reply_1.payload { + StacksMessageType::HandshakeReject => {} + _ => { + assert!(false); } + }; - // received a valid HandshakeAccept from peer 2 - match reply_handshake_1.payload { - StacksMessageType::HandshakeAccept(ref data) - | StacksMessageType::StackerDBHandshakeAccept(ref data, ..) => { - assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); - assert_eq!(data.handshake.port, local_peer_2.port); - assert_eq!(data.handshake.services, local_peer_2.services); - assert_eq!( - data.handshake.node_public_key, - StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private( - &local_peer_2.private_key - )) - ); - assert_eq!( - data.handshake.expire_block_height, - local_peer_2.private_key_expire - ); - assert_eq!(data.heartbeat_interval, conn_opts.heartbeat); - } - _ => { - assert!(false); - } - }; - - // convo_2 got updated with convo_1's peer info, and default heartbeat filled in - assert_eq!(convo_2.peer_heartbeat, 3600); - assert_eq!( - convo_2 - .connection - .get_public_key() - .unwrap() - .to_bytes_compressed(), - Secp256k1PublicKey::from_private(&local_peer_1.private_key).to_bytes_compressed() - ); - - // convo_1 got updated with convo_2's peer info, as well as heartbeat - assert_eq!(convo_1.peer_heartbeat, conn_opts.heartbeat); - assert_eq!( - convo_1 - .connection - .get_public_key() - .unwrap() - .to_bytes_compressed(), - Secp256k1PublicKey::from_private(&local_peer_2.private_key).to_bytes_compressed() - ); - - // regenerate keys and expiries in peer 1 - let new_privkey = Secp256k1PrivateKey::new(); - { - let mut tx = peerdb_1.tx_begin().unwrap(); - PeerDB::set_local_private_key(&mut tx, &new_privkey, (12350 + i) as u64).unwrap(); - tx.commit().unwrap(); - } - } + // neither peer updated their info on one another + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); } #[test] - fn convo_nack_unsolicited() { + fn convo_ping() { let conn_opts = ConnectionOptions::default(); let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); @@ -4141,7 +4102,7 @@ mod test { .unwrap(); let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_nack_unsolicited_1", + "convo_ping_1", &burnchain, 0x9abcdef0, 12350, @@ -4151,7 +4112,7 @@ mod test { DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_nack_unsolicited_2", + "convo_ping_2", &burnchain, 0x9abcdef0, 12351, @@ -4188,9 +4149,18 @@ mod test { StacksEpoch::unit_test_pre_2_05(0), ); - // no peer public keys known yet - assert!(convo_1.connection.get_public_key().is_none()); - assert!(convo_2.connection.get_public_key().is_none()); + // convo_1 sends a handshake to convo_2 + let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); + let handshake_1 = convo_1 + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Handshake(handshake_data_1.clone()), + ) + .unwrap(); + let mut rh_handshake_1 = convo_1 + .send_signed_request(handshake_1.clone(), 1000000) + .unwrap(); // convo_1 sends a ping to convo_2 let ping_data_1 = PingData::new(); @@ -4201,10 +4171,19 @@ mod test { StacksMessageType::Ping(ping_data_1.clone()), ) .unwrap(); - let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap(); + let mut rh_ping_1 = convo_1 + .send_signed_request(ping_1.clone(), 1000000) + .unwrap(); - // convo_2 will reply with a nack since peer_1 hasn't authenticated yet - convo_send_recv(&mut convo_1, vec![&mut rh_ping_1], &mut convo_2); + // convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward + // it along to the chat caller (us) + test_debug!("send handshake {:?}", &handshake_1); + test_debug!("send ping {:?}", &ping_1); + convo_send_recv( + &mut convo_1, + vec![&mut rh_handshake_1, &mut rh_ping_1], + &mut convo_2, + ); let unhandled_2 = convo_2 .chat( &local_peer_2, @@ -4217,8 +4196,14 @@ mod test { ) .unwrap(); - // convo_1 has a nack - convo_send_recv(&mut convo_2, vec![&mut rh_ping_1], &mut convo_1); + // convo_1 has a handshakeaccept + test_debug!("reply handshake-accept"); + test_debug!("send pong"); + convo_send_recv( + &mut convo_2, + vec![&mut rh_handshake_1, &mut rh_ping_1], + &mut convo_1, + ); let unhandled_1 = convo_1 .chat( &local_peer_1, @@ -4231,107 +4216,112 @@ mod test { ) .unwrap(); - let reply_1 = rh_ping_1.recv(0).unwrap(); + let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); + let reply_ping_1 = rh_ping_1.recv(0).unwrap(); - // convo_2 gives back nothing assert_eq!(unhandled_1.len(), 0); - assert_eq!(unhandled_2.len(), 0); + assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed - // convo_1 got a NACK - match reply_1.payload { - StacksMessageType::Nack(ref data) => { - assert_eq!(data.error_code, NackErrorCodes::HandshakeRequired); + // convo 2 returns the handshake from convo 1 + match unhandled_2[0].payload { + StacksMessageType::Handshake(ref data) => { + assert_eq!(handshake_data_1, *data); } _ => { assert!(false); } }; - // convo_2 did NOT get updated with convo_1's peer info - assert_eq!(convo_2.peer_heartbeat, 0); - assert!(convo_2.connection.get_public_key().is_none()); - - // convo_1 did NOT get updated - assert_eq!(convo_1.peer_heartbeat, 0); - assert!(convo_2.connection.get_public_key().is_none()); + // convo 2 replied to convo 1 with a matching pong + match reply_ping_1.payload { + StacksMessageType::Pong(ref data) => { + assert_eq!(data.nonce, ping_data_1.nonce); + } + _ => { + assert!(false); + } + } } #[test] - fn convo_handshake_getblocksinv() { - with_timeout(100, || { - let conn_opts = ConnectionOptions::default(); - - let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(); - - let burnchain = testing_burnchain_config(); + fn convo_handshake_ping_loop() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let mut chain_view = BurnchainView { - burn_block_height: 12331, // burnchain.reward_cycle_to_block_height(burnchain.block_height_to_reward_cycle(12348 - 8).unwrap()), - burn_block_hash: BurnchainHeaderHash([0x11; 32]), - burn_stable_block_height: 12331 - 7, // burnchain.reward_cycle_to_block_height(burnchain.block_height_to_reward_cycle(12341 - 8).unwrap() - 1), - burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), - last_burn_block_hashes: HashMap::new(), - rc_consensus_hash: ConsensusHash([0x33; 20]), - }; - chain_view.make_test_data(); + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); - let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_handshake_getblocksinv_1", - &burnchain, - 0x9abcdef0, - 12350, - "http://peer1.com".into(), - &vec![], - &vec![], - DEFAULT_SERVICES, - ); - let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_handshake_getblocksinv_2", - &burnchain, - 0x9abcdef0, - 12351, - "http://peer2.com".into(), - &vec![], - &vec![], - DEFAULT_SERVICES, - ); + let burnchain = testing_burnchain_config(); - db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); - db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); - let mut convo_1 = ConversationP2P::new( - 123, - 456, - &burnchain, - &socketaddr_2, - &conn_opts, - true, - 0, - StacksEpoch::unit_test_pre_2_05(0), - ); - let mut convo_2 = ConversationP2P::new( - 123, - 456, - &burnchain, - &socketaddr_1, - &conn_opts, - true, - 0, - StacksEpoch::unit_test_pre_2_05(0), - ); + let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( + "convo_handshake_ping_loop_1", + &burnchain, + 0x9abcdef0, + 12350, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( + "convo_handshake_ping_loop_2", + &burnchain, + 0x9abcdef0, + 12351, + "http://peer2.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); - // no peer public keys known yet - assert!(convo_1.connection.get_public_key().is_none()); - assert!(convo_2.connection.get_public_key().is_none()); + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + let mut convo_2 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 1, + StacksEpoch::unit_test_pre_2_05(0), + ); + + for i in 0..5 { + // do handshake/ping over and over, with different keys. + // tests re-keying. // convo_1 sends a handshake to convo_2 let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); @@ -4342,12 +4332,26 @@ mod test { StacksMessageType::Handshake(handshake_data_1.clone()), ) .unwrap(); - let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); + let mut rh_handshake_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); - // convo_2 receives it and processes it, and since no one is waiting for it, will forward + // convo_1 sends a ping to convo_2 + let ping_data_1 = PingData::new(); + let ping_1 = convo_1 + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data_1.clone()), + ) + .unwrap(); + let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap(); + + // convo_2 receives the handshake and ping and processes both, and since no one is waiting for the handshake, will forward // it along to the chat caller (us) - test_debug!("send handshake"); - convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + convo_send_recv( + &mut convo_1, + vec![&mut rh_handshake_1, &mut rh_ping_1], + &mut convo_2, + ); let unhandled_2 = convo_2 .chat( &local_peer_2, @@ -4361,8 +4365,11 @@ mod test { .unwrap(); // convo_1 has a handshakeaccept - test_debug!("send handshake-accept"); - convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + convo_send_recv( + &mut convo_2, + vec![&mut rh_handshake_1, &mut rh_ping_1], + &mut convo_1, + ); let unhandled_1 = convo_1 .chat( &local_peer_1, @@ -4375,10 +4382,11 @@ mod test { ) .unwrap(); - let reply_1 = rh_1.recv(0).unwrap(); + let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); + let reply_ping_1 = rh_ping_1.recv(0).unwrap(); assert_eq!(unhandled_1.len(), 0); - assert_eq!(unhandled_2.len(), 1); + assert_eq!(unhandled_2.len(), 1); // only the handshake is given back. the ping is consumed // convo 2 returns the handshake from convo 1 match unhandled_2[0].payload { @@ -4390,9 +4398,20 @@ mod test { } }; + // convo 2 replied to convo 1 with a matching pong + match reply_ping_1.payload { + StacksMessageType::Pong(ref data) => { + assert_eq!(data.nonce, ping_data_1.nonce); + } + _ => { + assert!(false); + } + } + // received a valid HandshakeAccept from peer 2 - match reply_1.payload { - StacksMessageType::HandshakeAccept(ref data) => { + match reply_handshake_1.payload { + StacksMessageType::HandshakeAccept(ref data) + | StacksMessageType::StackerDBHandshakeAccept(ref data, ..) => { assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); assert_eq!(data.handshake.port, local_peer_2.port); assert_eq!(data.handshake.services, local_peer_2.services); @@ -4406,7 +4425,6 @@ mod test { data.handshake.expire_block_height, local_peer_2.private_key_expire ); - assert_eq!(data.handshake.data_url, "http://peer2.com".into()); assert_eq!(data.heartbeat_interval, conn_opts.heartbeat); } _ => { @@ -4414,195 +4432,81 @@ mod test { } }; - // convo_1 sends a getblocksinv to convo_2 for all the blocks in the last reward cycle - let convo_1_chaintip = - SortitionDB::get_canonical_burn_chain_tip(sortdb_1.conn()).unwrap(); - let convo_1_ancestor = { - let ic = sortdb_1.index_conn(); - SortitionDB::get_ancestor_snapshot( - &ic, - convo_1_chaintip.block_height - 10 - 1, - &convo_1_chaintip.sortition_id, - ) - .unwrap() - .unwrap() - }; - - let getblocksdata_1 = GetBlocksInv { - consensus_hash: convo_1_ancestor.consensus_hash, - num_blocks: 10 as u16, - }; - let getblocksdata_1_msg = convo_1 - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::GetBlocksInv(getblocksdata_1.clone()), - ) - .unwrap(); - let mut rh_1 = convo_1 - .send_signed_request(getblocksdata_1_msg, 10000000) - .unwrap(); + // convo_2 got updated with convo_1's peer info, and default heartbeat filled in + assert_eq!(convo_2.peer_heartbeat, 3600); + assert_eq!( + convo_2 + .connection + .get_public_key() + .unwrap() + .to_bytes_compressed(), + Secp256k1PublicKey::from_private(&local_peer_1.private_key).to_bytes_compressed() + ); - // convo_2 receives it, and handles it - test_debug!("send getblocksinv"); - convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); - let unhandled_2 = convo_2 - .chat( - &local_peer_2, - &mut peerdb_2, - &sortdb_2, - &pox_id_2, - &mut chainstate_2, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); + // convo_1 got updated with convo_2's peer info, as well as heartbeat + assert_eq!(convo_1.peer_heartbeat, conn_opts.heartbeat); + assert_eq!( + convo_1 + .connection + .get_public_key() + .unwrap() + .to_bytes_compressed(), + Secp256k1PublicKey::from_private(&local_peer_2.private_key).to_bytes_compressed() + ); - // convo_1 gets back a blocksinv message - test_debug!("send blocksinv"); - convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); - let unhandled_1 = convo_1 - .chat( - &local_peer_1, - &mut peerdb_1, - &sortdb_1, - &pox_id_1, - &mut chainstate_1, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); + // regenerate keys and expiries in peer 1 + let new_privkey = Secp256k1PrivateKey::new(); + { + let mut tx = peerdb_1.tx_begin().unwrap(); + PeerDB::set_local_private_key(&mut tx, &new_privkey, (12350 + i) as u64).unwrap(); + tx.commit().unwrap(); + } + } + } - let reply_1 = rh_1.recv(0).unwrap(); + #[test] + fn convo_nack_unsolicited() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - // no unhandled messages forwarded - assert_eq!(unhandled_1, vec![]); - assert_eq!(unhandled_2, vec![]); + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); - // convo 2 returned a block-inv for all blocks - match reply_1.payload { - StacksMessageType::BlocksInv(ref data) => { - assert_eq!(data.bitlen, 10); - test_debug!("data: {:?}", data); + let burnchain = testing_burnchain_config(); - // all burn blocks had sortitions, but we have no Stacks blocks :( - for i in 0..data.bitlen { - assert!(!data.has_ith_block(i)); - } - } - x => { - error!("received invalid payload: {:?}", &x); - assert!(false); - } - } + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); - // request for a non-existent consensus hash - let getblocksdata_diverged_1 = GetBlocksInv { - consensus_hash: ConsensusHash([0xff; 20]), - num_blocks: GETPOXINV_MAX_BITLEN as u16, - }; - let getblocksdata_diverged_1_msg = convo_1 - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::GetBlocksInv(getblocksdata_diverged_1.clone()), - ) - .unwrap(); - let mut rh_1 = convo_1 - .send_signed_request(getblocksdata_diverged_1_msg, 10000000) - .unwrap(); - - // convo_2 receives it, and handles it - test_debug!("send getblocksinv (diverged)"); - convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); - let unhandled_2 = convo_2 - .chat( - &local_peer_2, - &mut peerdb_2, - &sortdb_2, - &pox_id_2, - &mut chainstate_2, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); - - // convo_1 gets back a nack message - test_debug!("send nack (diverged)"); - convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); - let unhandled_1 = convo_1 - .chat( - &local_peer_1, - &mut peerdb_1, - &sortdb_1, - &pox_id_1, - &mut chainstate_1, - &mut BlockHeaderCache::new(), - &chain_view, - ) - .unwrap(); - - let reply_1 = rh_1.recv(0).unwrap(); - - // no unhandled messages forwarded - assert_eq!(unhandled_1, vec![]); - assert_eq!(unhandled_2, vec![]); - - // convo 2 returned a nack with the appropriate error message - match reply_1.payload { - StacksMessageType::Nack(ref data) => { - assert_eq!(data.error_code, NackErrorCodes::NoSuchBurnchainBlock); - } - _ => { - assert!(false); - } - } - }) - } - - #[test] - fn convo_natpunch() { - let conn_opts = ConnectionOptions::default(); - let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(); - - let burnchain = testing_burnchain_config(); - - let mut chain_view = BurnchainView { - burn_block_height: 12348, - burn_block_hash: BurnchainHeaderHash([0x11; 32]), - burn_stable_block_height: 12341, - burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), - last_burn_block_hashes: HashMap::new(), - rc_consensus_hash: ConsensusHash([0x33; 20]), - }; - chain_view.make_test_data(); - - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(); + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( - "convo_natpunch_1", + "convo_nack_unsolicited_1", &burnchain, 0x9abcdef0, - 12352, + 12350, "http://peer1.com".into(), &vec![], &vec![], DEFAULT_SERVICES, ); let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( - "convo_natpunch_2", + "convo_nack_unsolicited_2", &burnchain, 0x9abcdef0, - 12353, + 12351, "http://peer2.com".into(), &vec![], &vec![], @@ -4636,21 +4540,23 @@ mod test { StacksEpoch::unit_test_pre_2_05(0), ); - // convo_1 sends natpunch request to convo_2 - let natpunch_1 = convo_1 + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 sends a ping to convo_2 + let ping_data_1 = PingData::new(); + let ping_1 = convo_1 .sign_message( &chain_view, &local_peer_1.private_key, - StacksMessageType::NatPunchRequest(0x12345678), + StacksMessageType::Ping(ping_data_1.clone()), ) .unwrap(); - let mut rh_natpunch_1 = convo_1 - .send_signed_request(natpunch_1.clone(), 1000000) - .unwrap(); + let mut rh_ping_1 = convo_1.send_signed_request(ping_1, 1000000).unwrap(); - // convo_2 receives the natpunch request and processes it - test_debug!("send natpunch {:?}", &natpunch_1); - convo_send_recv(&mut convo_1, vec![&mut rh_natpunch_1], &mut convo_2); + // convo_2 will reply with a nack since peer_1 hasn't authenticated yet + convo_send_recv(&mut convo_1, vec![&mut rh_ping_1], &mut convo_2); let unhandled_2 = convo_2 .chat( &local_peer_2, @@ -4663,9 +4569,8 @@ mod test { ) .unwrap(); - // convo_1 gets back a natpunch reply - test_debug!("reply natpunch-reply"); - convo_send_recv(&mut convo_2, vec![&mut rh_natpunch_1], &mut convo_1); + // convo_1 has a nack + convo_send_recv(&mut convo_2, vec![&mut rh_ping_1], &mut convo_1); let unhandled_1 = convo_1 .chat( &local_peer_1, @@ -4678,27 +4583,33 @@ mod test { ) .unwrap(); - let natpunch_reply_1 = rh_natpunch_1.recv(0).unwrap(); + let reply_1 = rh_ping_1.recv(0).unwrap(); - // handled and consumed + // convo_2 gives back nothing assert_eq!(unhandled_1.len(), 0); assert_eq!(unhandled_2.len(), 0); - // convo_2 replies the natpunch data for convo_1 -- i.e. what convo_2 thinks convo_1's IP - // address is - match natpunch_reply_1.payload { - StacksMessageType::NatPunchReply(ref data) => { - assert_eq!(data.addrbytes, PeerAddress::from_socketaddr(&socketaddr_1)); - assert_eq!(data.nonce, 0x12345678); + // convo_1 got a NACK + match reply_1.payload { + StacksMessageType::Nack(ref data) => { + assert_eq!(data.error_code, NackErrorCodes::HandshakeRequired); } _ => { assert!(false); } - } + }; + + // convo_2 did NOT get updated with convo_1's peer info + assert_eq!(convo_2.peer_heartbeat, 0); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 did NOT get updated + assert_eq!(convo_1.peer_heartbeat, 0); + assert!(convo_2.connection.get_public_key().is_none()); } #[test] - fn convo_is_preamble_valid() { + fn convo_ignore_unsolicited_handshake() { let conn_opts = ConnectionOptions::default(); let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); @@ -4720,125 +4631,203 @@ mod test { }; chain_view.make_test_data(); - let mut peerdb_1 = PeerDB::connect_memory( + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( + "convo_ignore_unsolicited_handshake_1", + &burnchain, 0x9abcdef0, - 0, 12350, "http://peer1.com".into(), &vec![], &vec![], - ) - .unwrap(); - - let mut sortdb_1 = SortitionDB::connect_test(12300, &first_burn_hash).unwrap(); + DEFAULT_SERVICES, + ); + let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( + "convo_ignore_unsolicited_handshake_2", + &burnchain, + 0x9abcdef0, + 12351, + "http://peer2.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); - // network ID check - { - let mut convo_bad = ConversationP2P::new( - 123, - 456, - &burnchain, - &socketaddr_2, - &conn_opts, - true, - 0, - StacksEpoch::unit_test_pre_2_05(0), - ); + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + let mut convo_2 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); - let ping_data = PingData::new(); - convo_bad.network_id += 1; - let ping_bad = convo_bad + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 sends unauthenticated control-plane messages to convo_2 + let unauthed_messages = { + let accept_data_1 = HandshakeAcceptData::new(&local_peer_1, 1000000000); + let accept_1 = convo_1 .sign_message( &chain_view, &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), + StacksMessageType::HandshakeAccept(accept_data_1.clone()), ) .unwrap(); - convo_bad.network_id -= 1; - - assert_eq!( - convo_bad.is_preamble_valid(&ping_bad, &chain_view), - Err(net_error::InvalidMessage) - ); - } - // stable block height check - { - let mut convo_bad = ConversationP2P::new( - 123, - 456, - &burnchain, - &socketaddr_2, - &conn_opts, - true, - 0, - StacksEpoch::unit_test_pre_2_05(0), + let stackerdb_accept_data_1 = StacksMessageType::StackerDBHandshakeAccept( + accept_data_1.clone(), + StackerDBHandshakeData { + rc_consensus_hash: chain_view.rc_consensus_hash.clone(), + // placeholder sbtc address for now + smart_contracts: vec![ + ContractId::parse("SP000000000000000000002Q6VF78.sbtc").unwrap() + ], + }, ); - let ping_data = PingData::new(); - - let mut chain_view_bad = chain_view.clone(); - chain_view_bad.burn_stable_block_height -= 1; - - let ping_bad = convo_bad + let stackerdb_accept_1 = convo_1 .sign_message( - &chain_view_bad, + &chain_view, &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), + stackerdb_accept_data_1, ) .unwrap(); - assert_eq!( - convo_bad.is_preamble_valid(&ping_bad, &chain_view), - Err(net_error::InvalidMessage) - ); - } - - // unstable burn header hash mismatch - { - let mut convo_bad = ConversationP2P::new( - 123, - 456, - &burnchain, - &socketaddr_2, - &conn_opts, - true, - 0, - StacksEpoch::unit_test_pre_2_05(0), - ); + vec![accept_1, stackerdb_accept_1] + }; - let ping_data = PingData::new(); + for unauthed_msg in unauthed_messages.into_iter() { + let mut rh_1 = convo_1.send_signed_request(unauthed_msg, 1000000).unwrap(); - let mut chain_view_bad = chain_view.clone(); - let old = chain_view_bad.burn_block_hash.clone(); - chain_view_bad.burn_block_hash = BurnchainHeaderHash([0x33; 32]); - chain_view_bad.last_burn_block_hashes.insert( - chain_view_bad.burn_block_height, - chain_view_bad.burn_block_hash.clone(), - ); + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); - let ping_bad = convo_bad - .sign_message( - &chain_view_bad, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, ) .unwrap(); - // considered valid as long as the stable burn header hash is valid - assert_eq!( - convo_bad.is_preamble_valid(&ping_bad, &chain_view), - Ok(true) - ); + // connection should break off since nodes ignore unsolicited messages + match rh_1.recv(1).unwrap_err() { + net_error::ConnectionBroken => {} + e => { + panic!( + "Unexpected error from consuming unsolicited message: {:?}", + &e + ); + } + } + + // convo_2 gives back nothing + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 0); + + // convo_2 did NOT get updated with convo_1's peer info + assert_eq!(convo_2.peer_heartbeat, 0); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 did NOT get updated + assert_eq!(convo_1.peer_heartbeat, 0); + assert!(convo_2.connection.get_public_key().is_none()); } + } - // stable burn header hash mismatch - { - let mut convo_bad = ConversationP2P::new( + #[test] + fn convo_handshake_getblocksinv() { + with_timeout(100, || { + let conn_opts = ConnectionOptions::default(); + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12331, // burnchain.reward_cycle_to_block_height(burnchain.block_height_to_reward_cycle(12348 - 8).unwrap()), + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12331 - 7, // burnchain.reward_cycle_to_block_height(burnchain.block_height_to_reward_cycle(12341 - 8).unwrap() - 1), + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( + "convo_handshake_getblocksinv_1", + &burnchain, + 0x9abcdef0, + 12350, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( + "convo_handshake_getblocksinv_2", + &burnchain, + 0x9abcdef0, + 12351, + "http://peer2.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( 123, 456, &burnchain, @@ -4848,132 +4837,1446 @@ mod test { 0, StacksEpoch::unit_test_pre_2_05(0), ); - - let ping_data = PingData::new(); - - let mut chain_view_bad = chain_view.clone(); - let old = chain_view_bad.burn_stable_block_hash.clone(); - chain_view_bad.burn_stable_block_hash = BurnchainHeaderHash([0x11; 32]); - chain_view_bad.last_burn_block_hashes.insert( - chain_view_bad.burn_stable_block_height, - chain_view_bad.burn_stable_block_hash.clone(), + let mut convo_2 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), ); - let ping_bad = convo_bad + // no peer public keys known yet + assert!(convo_1.connection.get_public_key().is_none()); + assert!(convo_2.connection.get_public_key().is_none()); + + // convo_1 sends a handshake to convo_2 + let handshake_data_1 = HandshakeData::from_local_peer(&local_peer_1); + let handshake_1 = convo_1 .sign_message( - &chain_view_bad, + &chain_view, &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), + StacksMessageType::Handshake(handshake_data_1.clone()), ) .unwrap(); + let mut rh_1 = convo_1.send_signed_request(handshake_1, 1000000).unwrap(); - assert_eq!( - convo_bad.is_preamble_valid(&ping_bad, &chain_view), - Err(net_error::InvalidMessage) - ); - } + // convo_2 receives it and processes it, and since no one is waiting for it, will forward + // it along to the chat caller (us) + test_debug!("send handshake"); + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); - // stale peer version max-epoch - { - // convo thinks its epoch 2.05 - let epochs = StacksEpoch::unit_test_2_05(chain_view.burn_block_height - 4); - let cur_epoch_idx = - StacksEpoch::find_epoch(&epochs, chain_view.burn_block_height).unwrap(); - let cur_epoch = epochs[cur_epoch_idx].clone(); - assert_eq!(cur_epoch.epoch_id, StacksEpochId::Epoch2_05); + // convo_1 has a handshakeaccept + test_debug!("send handshake-accept"); + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); - eprintln!( - "cur_epoch = {:?}, burn height = {}", - &cur_epoch, chain_view.burn_block_height - ); + let reply_1 = rh_1.recv(0).unwrap(); - let mut convo_bad = ConversationP2P::new( - 123, - 0x18000005, - &burnchain, - &socketaddr_2, - &conn_opts, - true, - 0, - epochs, - ); + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 1); + + // convo 2 returns the handshake from convo 1 + match unhandled_2[0].payload { + StacksMessageType::Handshake(ref data) => { + assert_eq!(handshake_data_1, *data); + } + _ => { + assert!(false); + } + }; + + // received a valid HandshakeAccept from peer 2 + match reply_1.payload { + StacksMessageType::HandshakeAccept(ref data) => { + assert_eq!(data.handshake.addrbytes, local_peer_2.addrbytes); + assert_eq!(data.handshake.port, local_peer_2.port); + assert_eq!(data.handshake.services, local_peer_2.services); + assert_eq!( + data.handshake.node_public_key, + StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private( + &local_peer_2.private_key + )) + ); + assert_eq!( + data.handshake.expire_block_height, + local_peer_2.private_key_expire + ); + assert_eq!(data.handshake.data_url, "http://peer2.com".into()); + assert_eq!(data.heartbeat_interval, conn_opts.heartbeat); + } + _ => { + assert!(false); + } + }; + + // convo_1 sends a getblocksinv to convo_2 for all the blocks in the last reward cycle + let convo_1_chaintip = + SortitionDB::get_canonical_burn_chain_tip(sortdb_1.conn()).unwrap(); + let convo_1_ancestor = { + let ic = sortdb_1.index_conn(); + SortitionDB::get_ancestor_snapshot( + &ic, + convo_1_chaintip.block_height - 10 - 1, + &convo_1_chaintip.sortition_id, + ) + .unwrap() + .unwrap() + }; + + let getblocksdata_1 = GetBlocksInv { + consensus_hash: convo_1_ancestor.consensus_hash, + num_blocks: 10 as u16, + }; + let getblocksdata_1_msg = convo_1 + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::GetBlocksInv(getblocksdata_1.clone()), + ) + .unwrap(); + let mut rh_1 = convo_1 + .send_signed_request(getblocksdata_1_msg, 10000000) + .unwrap(); + + // convo_2 receives it, and handles it + test_debug!("send getblocksinv"); + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + // convo_1 gets back a blocksinv message + test_debug!("send blocksinv"); + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + let reply_1 = rh_1.recv(0).unwrap(); + + // no unhandled messages forwarded + assert_eq!(unhandled_1, vec![]); + assert_eq!(unhandled_2, vec![]); + + // convo 2 returned a block-inv for all blocks + match reply_1.payload { + StacksMessageType::BlocksInv(ref data) => { + assert_eq!(data.bitlen, 10); + test_debug!("data: {:?}", data); + + // all burn blocks had sortitions, but we have no Stacks blocks :( + for i in 0..data.bitlen { + assert!(!data.has_ith_block(i)); + } + } + x => { + error!("received invalid payload: {:?}", &x); + assert!(false); + } + } + + // request for a non-existent consensus hash + let getblocksdata_diverged_1 = GetBlocksInv { + consensus_hash: ConsensusHash([0xff; 20]), + num_blocks: GETPOXINV_MAX_BITLEN as u16, + }; + let getblocksdata_diverged_1_msg = convo_1 + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::GetBlocksInv(getblocksdata_diverged_1.clone()), + ) + .unwrap(); + let mut rh_1 = convo_1 + .send_signed_request(getblocksdata_diverged_1_msg, 10000000) + .unwrap(); + + // convo_2 receives it, and handles it + test_debug!("send getblocksinv (diverged)"); + convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + // convo_1 gets back a nack message + test_debug!("send nack (diverged)"); + convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + let reply_1 = rh_1.recv(0).unwrap(); + + // no unhandled messages forwarded + assert_eq!(unhandled_1, vec![]); + assert_eq!(unhandled_2, vec![]); + + // convo 2 returned a nack with the appropriate error message + match reply_1.payload { + StacksMessageType::Nack(ref data) => { + assert_eq!(data.error_code, NackErrorCodes::NoSuchBurnchainBlock); + } + _ => { + assert!(false); + } + } + }) + } + + #[test] + fn convo_natpunch() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( + "convo_natpunch_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + let (mut peerdb_2, mut sortdb_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( + "convo_natpunch_2", + &burnchain, + 0x9abcdef0, + 12353, + "http://peer2.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + db_setup(&mut peerdb_2, &mut sortdb_2, &socketaddr_2, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + let mut convo_2 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + // convo_1 sends natpunch request to convo_2 + let natpunch_1 = convo_1 + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::NatPunchRequest(0x12345678), + ) + .unwrap(); + let mut rh_natpunch_1 = convo_1 + .send_signed_request(natpunch_1.clone(), 1000000) + .unwrap(); + + // convo_2 receives the natpunch request and processes it + test_debug!("send natpunch {:?}", &natpunch_1); + convo_send_recv(&mut convo_1, vec![&mut rh_natpunch_1], &mut convo_2); + let unhandled_2 = convo_2 + .chat( + &local_peer_2, + &mut peerdb_2, + &sortdb_2, + &pox_id_2, + &mut chainstate_2, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + // convo_1 gets back a natpunch reply + test_debug!("reply natpunch-reply"); + convo_send_recv(&mut convo_2, vec![&mut rh_natpunch_1], &mut convo_1); + let unhandled_1 = convo_1 + .chat( + &local_peer_1, + &mut peerdb_1, + &sortdb_1, + &pox_id_1, + &mut chainstate_1, + &mut BlockHeaderCache::new(), + &chain_view, + ) + .unwrap(); + + let natpunch_reply_1 = rh_natpunch_1.recv(0).unwrap(); + + // handled and consumed + assert_eq!(unhandled_1.len(), 0); + assert_eq!(unhandled_2.len(), 0); + + // convo_2 replies the natpunch data for convo_1 -- i.e. what convo_2 thinks convo_1's IP + // address is + match natpunch_reply_1.payload { + StacksMessageType::NatPunchReply(ref data) => { + assert_eq!(data.addrbytes, PeerAddress::from_socketaddr(&socketaddr_1)); + assert_eq!(data.nonce, 0x12345678); + } + _ => { + assert!(false); + } + } + } + + #[test] + fn convo_is_preamble_valid() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let mut peerdb_1 = PeerDB::connect_memory( + 0x9abcdef0, + 0, + 12350, + "http://peer1.com".into(), + &vec![], + &vec![], + ) + .unwrap(); + + let mut sortdb_1 = SortitionDB::connect_test(12300, &first_burn_hash).unwrap(); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + // network ID check + { + let mut convo_bad = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let ping_data = PingData::new(); + convo_bad.network_id += 1; + let ping_bad = convo_bad + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + convo_bad.network_id -= 1; + + assert_eq!( + convo_bad.is_preamble_valid(&ping_bad, &chain_view), + Err(net_error::InvalidMessage) + ); + } + + // stable block height check + { + let mut convo_bad = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let ping_data = PingData::new(); + + let mut chain_view_bad = chain_view.clone(); + chain_view_bad.burn_stable_block_height -= 1; + + let ping_bad = convo_bad + .sign_message( + &chain_view_bad, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + + assert_eq!( + convo_bad.is_preamble_valid(&ping_bad, &chain_view), + Err(net_error::InvalidMessage) + ); + } + + // unstable burn header hash mismatch + { + let mut convo_bad = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let ping_data = PingData::new(); + + let mut chain_view_bad = chain_view.clone(); + let old = chain_view_bad.burn_block_hash.clone(); + chain_view_bad.burn_block_hash = BurnchainHeaderHash([0x33; 32]); + chain_view_bad.last_burn_block_hashes.insert( + chain_view_bad.burn_block_height, + chain_view_bad.burn_block_hash.clone(), + ); + + let ping_bad = convo_bad + .sign_message( + &chain_view_bad, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + + // considered valid as long as the stable burn header hash is valid + assert_eq!( + convo_bad.is_preamble_valid(&ping_bad, &chain_view), + Ok(true) + ); + } + + // stable burn header hash mismatch + { + let mut convo_bad = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let ping_data = PingData::new(); + + let mut chain_view_bad = chain_view.clone(); + let old = chain_view_bad.burn_stable_block_hash.clone(); + chain_view_bad.burn_stable_block_hash = BurnchainHeaderHash([0x11; 32]); + chain_view_bad.last_burn_block_hashes.insert( + chain_view_bad.burn_stable_block_height, + chain_view_bad.burn_stable_block_hash.clone(), + ); + + let ping_bad = convo_bad + .sign_message( + &chain_view_bad, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + + assert_eq!( + convo_bad.is_preamble_valid(&ping_bad, &chain_view), + Err(net_error::InvalidMessage) + ); + } + + // stale peer version max-epoch + { + // convo thinks its epoch 2.05 + let epochs = StacksEpoch::unit_test_2_05(chain_view.burn_block_height - 4); + let cur_epoch_idx = + StacksEpoch::find_epoch(&epochs, chain_view.burn_block_height).unwrap(); + let cur_epoch = epochs[cur_epoch_idx].clone(); + assert_eq!(cur_epoch.epoch_id, StacksEpochId::Epoch2_05); + + eprintln!( + "cur_epoch = {:?}, burn height = {}", + &cur_epoch, chain_view.burn_block_height + ); + + let mut convo_bad = ConversationP2P::new( + 123, + 0x18000005, + &burnchain, + &socketaddr_2, + &conn_opts, + true, + 0, + epochs, + ); + + let ping_data = PingData::new(); + + // give ping a pre-2.05 epoch marker in its peer version + convo_bad.version = 0x18000000; + let ping_bad = convo_bad + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + convo_bad.version = 0x18000005; + + assert_eq!( + convo_bad.is_preamble_valid(&ping_bad, &chain_view), + Err(net_error::InvalidMessage) + ); + + // give ping the same peer version as the convo + let ping_good = convo_bad + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + assert_eq!( + convo_bad.is_preamble_valid(&ping_good, &chain_view), + Ok(true) + ); + + // give ping a newer epoch than we support + convo_bad.version = 0x18000006; + let ping_good = convo_bad + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + convo_bad.version = 0x18000005; + assert_eq!( + convo_bad.is_preamble_valid(&ping_good, &chain_view), + Ok(true) + ); + + // give ping an older version, but test with a block in which the ping's version is + // valid + convo_bad.version = 0x18000000; + let ping_old = convo_bad + .sign_message( + &chain_view, + &local_peer_1.private_key, + StacksMessageType::Ping(ping_data.clone()), + ) + .unwrap(); + convo_bad.version = 0x18000005; + + let mut old_chain_view = chain_view.clone(); + old_chain_view.burn_block_height -= 1; + old_chain_view.burn_stable_block_height -= 1; + old_chain_view.last_burn_block_hashes.insert( + old_chain_view.burn_stable_block_height, + BurnchainHeaderHash([0xff; 32]), + ); + assert_eq!( + convo_bad.is_preamble_valid(&ping_old, &old_chain_view), + Ok(true) + ); + } + } + + #[test] + fn convo_process_relayers() { + let conn_opts = ConnectionOptions::default(); + let socketaddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8090); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let local_peer = LocalPeer::new( + 123, + burnchain.network_id, + PeerAddress::from_ipv4(127, 0, 0, 1), + NETWORK_P2P_PORT, + None, + get_epoch_time_secs() + 123456, + UrlString::try_from("http://foo.com").unwrap(), + ); + let mut convo = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let msg = convo + .sign_reply(&chain_view, &local_peer.private_key, payload, 123) + .unwrap(); + + // cycles + let relay_cycles = vec![ + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([0u8; 16]), + port: 123, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 123, + }, + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([1u8; 16]), + port: 456, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 456, + }, + ]; + + // contains localpeer + let self_sent = vec![RelayData { + peer: NeighborAddress { + addrbytes: local_peer.addrbytes.clone(), + port: local_peer.port, + public_key_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( + &local_peer.private_key, + )), + }, + seq: 789, + }]; + + // allowed + let mut relayers = vec![ + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([0u8; 16]), + port: 123, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 123, + }, + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([1u8; 16]), + port: 456, + public_key_hash: Hash160([1u8; 20]), + }, + seq: 456, + }, + ]; + + assert!(!convo.process_relayers(&local_peer, &msg.preamble, &relay_cycles)); + assert!(!convo.process_relayers(&local_peer, &msg.preamble, &self_sent)); + + assert!(convo.process_relayers(&local_peer, &msg.preamble, &relayers)); + + // stats updated + assert_eq!(convo.stats.relayed_messages.len(), 2); + let relayer_map = convo.stats.take_relayers(); + assert_eq!(convo.stats.relayed_messages.len(), 0); + + for r in relayers.drain(..) { + assert!(relayer_map.contains_key(&r.peer)); + + let stats = relayer_map.get(&r.peer).unwrap(); + assert_eq!(stats.num_messages, 1); + assert_eq!(stats.num_bytes, (msg.preamble.payload_len - 1) as u64); + } + } + + #[test] + fn test_neighbor_stats_healthpoint() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_health_score(), 0.5); + + for _ in 0..NUM_HEALTH_POINTS - 1 { + stats.add_healthpoint(true); + assert_eq!(stats.get_health_score(), 0.5); + } + + stats.add_healthpoint(true); + assert_eq!(stats.get_health_score(), 1.0); + + for _ in 0..(NUM_HEALTH_POINTS / 2) { + stats.add_healthpoint(false); + } + + assert_eq!(stats.get_health_score(), 0.5); + + for _ in 0..(NUM_HEALTH_POINTS / 2) { + stats.add_healthpoint(false); + } + + assert_eq!(stats.get_health_score(), 0.0); + } + + #[test] + fn test_neighbor_stats_block_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_block_push_bandwidth(), 0.0); + + stats.add_block_push(100); + assert_eq!(stats.get_block_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_block_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_block_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_block_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_block_push_bandwidth(), 110.0); + } + + #[test] + fn test_neighbor_stats_transaction_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_transaction_push_bandwidth(), 0.0); + + stats.add_transaction_push(100); + assert_eq!(stats.get_transaction_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_transaction_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_transaction_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_transaction_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_transaction_push_bandwidth(), 110.0); + } + + #[test] + fn test_neighbor_stats_microblocks_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_microblocks_push_bandwidth(), 0.0); + + stats.add_microblocks_push(100); + assert_eq!(stats.get_microblocks_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_microblocks_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_microblocks_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_microblocks_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_microblocks_push_bandwidth(), 110.0); + } + + #[test] + fn test_neighbor_stats_stackerdb_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_stackerdb_push_bandwidth(), 0.0); + + stats.add_stackerdb_push(100); + assert_eq!(stats.get_stackerdb_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_stackerdb_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_stackerdb_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_stackerdb_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_stackerdb_push_bandwidth(), 110.0); + } + + #[test] + fn test_sign_relay_forward_message() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "sign_relay_forward_message_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let relayers = vec![RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([0u8; 16]), + port: 123, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 123, + }]; + let msg = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + relayers.clone(), + payload.clone(), + ) + .unwrap(); + + let mut expected_relayers = relayers.clone(); + expected_relayers.push(RelayData { + peer: local_peer_1.to_neighbor_addr(), + seq: 0, + }); + + assert_eq!(msg.relayers, expected_relayers); + + // can't insert a loop + let fail = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + expected_relayers.clone(), + payload.clone(), + ) + .unwrap_err(); + + match fail { + net_error::InvalidMessage => {} + e => { + panic!("FATAL: unexpected error {:?}", &e); + } + } + + // can't forward with a loop either + let fail = convo_1 + .sign_and_forward( + &local_peer_1, + &chain_view, + expected_relayers.clone(), + payload, + ) + .unwrap_err(); + + match fail { + net_error::InvalidMessage => {} + e => { + panic!("FATAL: unexpected error {:?}", &e); + } + } + } + + #[test] + fn test_sign_and_forward() { + let conn_opts = ConnectionOptions::default(); + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "sign_and_forward_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + + // should succeed + convo_1 + .sign_and_forward(&local_peer_1, &chain_view, vec![], payload.clone()) + .unwrap(); + } + + #[test] + fn test_validate_block_push() { + let mut conn_opts = ConnectionOptions::default(); + conn_opts.max_block_push_bandwidth = 100; + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "validate_block_push_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + + // bad message -- got bad relayers (cycle) + let bad_relayers = vec![ + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([0u8; 16]), + port: 123, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 123, + }, + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([1u8; 16]), + port: 456, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 456, + }, + ]; + + let mut bad_msg = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + bad_relayers.clone(), + payload.clone(), + ) + .unwrap(); + + bad_msg.preamble.payload_len = 10; + + let err_before = convo_1.stats.msgs_err; + match convo_1 + .validate_blocks_push( + &local_peer_1, + &chain_view, + &bad_msg.preamble, + bad_msg.relayers.clone(), + ) + .unwrap_err() + { + net_error::InvalidMessage => {} + e => { + panic!("Wrong error: {:?}", &e); + } + } + assert_eq!(convo_1.stats.msgs_err, err_before + 1); + + // mock a second local peer with a different private key + let mut local_peer_2 = local_peer_1.clone(); + local_peer_2.private_key = Secp256k1PrivateKey::new(); + + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let mut msg = convo_1 + .sign_relay_message(&local_peer_2, &chain_view, vec![], payload.clone()) + .unwrap(); + + let err_before = convo_1.stats.msgs_err; + + // succeeds because it's the first sample + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_blocks_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_none()); + assert_eq!(convo_1.stats.msgs_err, err_before); + + // fails because the second sample says we're over bandwidth + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_blocks_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_some()); + assert_eq!(convo_1.stats.msgs_err, err_before); + } + + #[test] + fn test_validate_transaction_push() { + let mut conn_opts = ConnectionOptions::default(); + conn_opts.max_transaction_push_bandwidth = 100; + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "validate_transaction_push_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + + // bad message -- got bad relayers (cycle) + let bad_relayers = vec![ + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([0u8; 16]), + port: 123, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 123, + }, + RelayData { + peer: NeighborAddress { + addrbytes: PeerAddress([1u8; 16]), + port: 456, + public_key_hash: Hash160([0u8; 20]), + }, + seq: 456, + }, + ]; - let ping_data = PingData::new(); + let mut bad_msg = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + bad_relayers.clone(), + payload.clone(), + ) + .unwrap(); - // give ping a pre-2.05 epoch marker in its peer version - convo_bad.version = 0x18000000; - let ping_bad = convo_bad - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), - ) - .unwrap(); - convo_bad.version = 0x18000005; + bad_msg.preamble.payload_len = 10; - assert_eq!( - convo_bad.is_preamble_valid(&ping_bad, &chain_view), - Err(net_error::InvalidMessage) - ); + let err_before = convo_1.stats.msgs_err; + match convo_1 + .validate_transaction_push( + &local_peer_1, + &chain_view, + &bad_msg.preamble, + bad_msg.relayers.clone(), + ) + .unwrap_err() + { + net_error::InvalidMessage => {} + e => { + panic!("Wrong error: {:?}", &e); + } + } + assert_eq!(convo_1.stats.msgs_err, err_before + 1); - // give ping the same peer version as the convo - let ping_good = convo_bad - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), - ) - .unwrap(); - assert_eq!( - convo_bad.is_preamble_valid(&ping_good, &chain_view), - Ok(true) - ); + // mock a second local peer with a different private key + let mut local_peer_2 = local_peer_1.clone(); + local_peer_2.private_key = Secp256k1PrivateKey::new(); - // give ping a newer epoch than we support - convo_bad.version = 0x18000006; - let ping_good = convo_bad - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), - ) - .unwrap(); - convo_bad.version = 0x18000005; - assert_eq!( - convo_bad.is_preamble_valid(&ping_good, &chain_view), - Ok(true) - ); + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let mut msg = convo_1 + .sign_relay_message(&local_peer_2, &chain_view, vec![], payload.clone()) + .unwrap(); - // give ping an older version, but test with a block in which the ping's version is - // valid - convo_bad.version = 0x18000000; - let ping_old = convo_bad - .sign_message( - &chain_view, - &local_peer_1.private_key, - StacksMessageType::Ping(ping_data.clone()), - ) - .unwrap(); - convo_bad.version = 0x18000005; + let err_before = convo_1.stats.msgs_err; - let mut old_chain_view = chain_view.clone(); - old_chain_view.burn_block_height -= 1; - old_chain_view.burn_stable_block_height -= 1; - old_chain_view.last_burn_block_hashes.insert( - old_chain_view.burn_stable_block_height, - BurnchainHeaderHash([0xff; 32]), - ); - assert_eq!( - convo_bad.is_preamble_valid(&ping_old, &old_chain_view), - Ok(true) - ); - } + // succeeds because it's the first sample + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_transaction_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_none()); + assert_eq!(convo_1.stats.msgs_err, err_before); + + // fails because the second sample says we're over bandwidth + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_transaction_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_some()); + assert_eq!(convo_1.stats.msgs_err, err_before); } #[test] - fn convo_process_relayers() { - let conn_opts = ConnectionOptions::default(); - let socketaddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8090); + fn test_validate_microblocks_push() { + let mut conn_opts = ConnectionOptions::default(); + conn_opts.max_microblocks_push_bandwidth = 100; + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); let first_burn_hash = BurnchainHeaderHash::from_hex( "0000000000000000000000000000000000000000000000000000000000000000", @@ -4992,33 +6295,37 @@ mod test { }; chain_view.make_test_data(); - let local_peer = LocalPeer::new( - 123, - burnchain.network_id, - PeerAddress::from_ipv4(127, 0, 0, 1), - NETWORK_P2P_PORT, - None, - get_epoch_time_secs() + 123456, - UrlString::try_from("http://foo.com").unwrap(), + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "validate_microblocks_push_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, ); - let mut convo = ConversationP2P::new( + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( 123, 456, &burnchain, - &socketaddr, + &socketaddr_1, &conn_opts, true, 0, StacksEpoch::unit_test_pre_2_05(0), ); + // NOTE: payload can be anything since we only look at premable length here let payload = StacksMessageType::Nack(NackData { error_code: 123 }); - let msg = convo - .sign_reply(&chain_view, &local_peer.private_key, payload, 123) - .unwrap(); - // cycles - let relay_cycles = vec![ + // bad message -- got bad relayers (cycle) + let bad_relayers = vec![ RelayData { peer: NeighborAddress { addrbytes: PeerAddress([0u8; 16]), @@ -5037,20 +6344,128 @@ mod test { }, ]; - // contains localpeer - let self_sent = vec![RelayData { - peer: NeighborAddress { - addrbytes: local_peer.addrbytes.clone(), - port: local_peer.port, - public_key_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( - &local_peer.private_key, - )), - }, - seq: 789, - }]; + let mut bad_msg = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + bad_relayers.clone(), + payload.clone(), + ) + .unwrap(); - // allowed - let mut relayers = vec![ + bad_msg.preamble.payload_len = 10; + + let err_before = convo_1.stats.msgs_err; + match convo_1 + .validate_microblocks_push( + &local_peer_1, + &chain_view, + &bad_msg.preamble, + bad_msg.relayers.clone(), + ) + .unwrap_err() + { + net_error::InvalidMessage => {} + e => { + panic!("Wrong error: {:?}", &e); + } + } + assert_eq!(convo_1.stats.msgs_err, err_before + 1); + + // mock a second local peer with a different private key + let mut local_peer_2 = local_peer_1.clone(); + local_peer_2.private_key = Secp256k1PrivateKey::new(); + + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let mut msg = convo_1 + .sign_relay_message(&local_peer_2, &chain_view, vec![], payload.clone()) + .unwrap(); + + let err_before = convo_1.stats.msgs_err; + + // succeeds because it's the first sample + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_microblocks_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_none()); + assert_eq!(convo_1.stats.msgs_err, err_before); + + // fails because the second sample says we're over bandwidth + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_microblocks_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_some()); + assert_eq!(convo_1.stats.msgs_err, err_before); + } + + #[test] + fn test_validate_stackerdb_push() { + let mut conn_opts = ConnectionOptions::default(); + conn_opts.max_stackerdb_push_bandwidth = 100; + + let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); + + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let burnchain = testing_burnchain_config(); + + let mut chain_view = BurnchainView { + burn_block_height: 12348, + burn_block_hash: BurnchainHeaderHash([0x11; 32]), + burn_stable_block_height: 12341, + burn_stable_block_hash: BurnchainHeaderHash([0x22; 32]), + last_burn_block_hashes: HashMap::new(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + }; + chain_view.make_test_data(); + + let (mut peerdb_1, mut sortdb_1, pox_id_1, _) = make_test_chain_dbs( + "validate_stackerdb_push_1", + &burnchain, + 0x9abcdef0, + 12352, + "http://peer1.com".into(), + &vec![], + &vec![], + DEFAULT_SERVICES, + ); + + db_setup(&mut peerdb_1, &mut sortdb_1, &socketaddr_1, &chain_view); + + let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + + let mut convo_1 = ConversationP2P::new( + 123, + 456, + &burnchain, + &socketaddr_1, + &conn_opts, + true, + 0, + StacksEpoch::unit_test_pre_2_05(0), + ); + + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + + // bad message -- got bad relayers (cycle) + let bad_relayers = vec![ RelayData { peer: NeighborAddress { addrbytes: PeerAddress([0u8; 16]), @@ -5063,30 +6478,76 @@ mod test { peer: NeighborAddress { addrbytes: PeerAddress([1u8; 16]), port: 456, - public_key_hash: Hash160([1u8; 20]), + public_key_hash: Hash160([0u8; 20]), }, seq: 456, }, ]; - assert!(!convo.process_relayers(&local_peer, &msg.preamble, &relay_cycles)); - assert!(!convo.process_relayers(&local_peer, &msg.preamble, &self_sent)); + let mut bad_msg = convo_1 + .sign_relay_message( + &local_peer_1, + &chain_view, + bad_relayers.clone(), + payload.clone(), + ) + .unwrap(); - assert!(convo.process_relayers(&local_peer, &msg.preamble, &relayers)); + bad_msg.preamble.payload_len = 10; - // stats updated - assert_eq!(convo.stats.relayed_messages.len(), 2); - let relayer_map = convo.stats.take_relayers(); - assert_eq!(convo.stats.relayed_messages.len(), 0); + let err_before = convo_1.stats.msgs_err; + match convo_1 + .validate_stackerdb_push( + &local_peer_1, + &chain_view, + &bad_msg.preamble, + bad_msg.relayers.clone(), + ) + .unwrap_err() + { + net_error::InvalidMessage => {} + e => { + panic!("Wrong error: {:?}", &e); + } + } + assert_eq!(convo_1.stats.msgs_err, err_before + 1); - for r in relayers.drain(..) { - assert!(relayer_map.contains_key(&r.peer)); + // mock a second local peer with a different private key + let mut local_peer_2 = local_peer_1.clone(); + local_peer_2.private_key = Secp256k1PrivateKey::new(); - let stats = relayer_map.get(&r.peer).unwrap(); - assert_eq!(stats.num_messages, 1); - assert_eq!(stats.num_bytes, (msg.preamble.payload_len - 1) as u64); - } + // NOTE: payload can be anything since we only look at premable length here + let payload = StacksMessageType::Nack(NackData { error_code: 123 }); + let mut msg = convo_1 + .sign_relay_message(&local_peer_2, &chain_view, vec![], payload.clone()) + .unwrap(); + + let err_before = convo_1.stats.msgs_err; + + // succeeds because it's the first sample + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_stackerdb_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_none()); + assert_eq!(convo_1.stats.msgs_err, err_before); + + // fails because the second sample says we're over bandwidth + msg.preamble.payload_len = 106; + assert!(convo_1 + .validate_stackerdb_push( + &local_peer_1, + &chain_view, + &msg.preamble, + msg.relayers.clone() + ) + .unwrap() + .is_some()); + assert_eq!(convo_1.stats.msgs_err, err_before); } } - -// TODO: test bandwidth limits From adf01803e7f739ff64d20e7ca0dcc89e83456f62 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jul 2023 22:06:11 -0400 Subject: [PATCH 21/21] chore: add changelog entry --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 688a9ead4c..d8483a7e13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE will be useful for tools that use the Clarity library to analyze and manipulate Clarity source code, e.g. a formatter. - New RPC endpoint at /v2/constant_val to fetch a constant from a contract. +- Message definitions and codecs for Stacker DB, a replicated off-chain DB + hosted by subscribed Stacks nodes and controlled by smart contracts ### Fixed