Skip to content
This repository has been archived by the owner on Mar 14, 2023. It is now read-only.

Commit

Permalink
Merge pull request #77 from rajarshimaitra/electrum-example
Browse files Browse the repository at this point in the history
Add a electrum syncing example
  • Loading branch information
evanlinjin authored Jan 6, 2023
2 parents fbc88da + 6051596 commit b271db7
Show file tree
Hide file tree
Showing 9 changed files with 424 additions and 21 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ members = [
"bdk_chain",
"bdk_cli_lib",
"bdk_esplora_example",
"bdk_electrum_example",
"bdk_tmp_plan",
"bdk_coin_select"
]
14 changes: 4 additions & 10 deletions bdk_chain/src/chain_graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,12 +206,11 @@ impl<P: ChainPosition> ChainGraph<P> {

/// Convets a [`sparse_chain::ChangeSet`] to a valid [`ChangeSet`] by providing
/// full transactions for each addition.
///
pub fn inflate_changeset(
&self,
changeset: sparse_chain::ChangeSet<P>,
full_txs: impl IntoIterator<Item = Transaction>,
) -> Result<ChangeSet<P>, (sparse_chain::ChangeSet<P>, InflateFailure<P>)> {
) -> Result<ChangeSet<P>, InflateFailure<P>> {
let mut missing = self
.chain
.changeset_additions(&changeset)
Expand All @@ -235,15 +234,10 @@ impl<P: ChainPosition> ChainGraph<P> {
..Default::default()
},
};
self.fix_conflicts(&mut changeset).map_err(|inner| {
(
changeset.chain.clone(),
InflateFailure::UnresolvableConflict(inner),
)
})?;
self.fix_conflicts(&mut changeset)?;
Ok(changeset)
} else {
Err((changeset, InflateFailure::Missing(missing)))
Err(InflateFailure::Missing(missing))
}
}

Expand Down Expand Up @@ -358,7 +352,7 @@ impl<P: core::fmt::Debug> core::fmt::Display for InflateFailure<P> {
missing.len()
),
InflateFailure::UnresolvableConflict(inner) => {
write!(f, "cannot inflate changeset: {:?}", inner)
write!(f, "cannot inflate changeset: {}", inner)
}
}
}
Expand Down
10 changes: 2 additions & 8 deletions bdk_chain/tests/test_chain_graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -316,19 +316,13 @@ fn chain_graph_inflate_changeset() {

assert_eq!(
cg.inflate_changeset(chain_changeset.clone(), vec![]),
Err((
chain_changeset.clone(),
InflateFailure::Missing(expected_missing.clone())
))
Err(InflateFailure::Missing(expected_missing.clone()))
);

expected_missing.remove(&tx_b.txid());
assert_eq!(
cg.inflate_changeset(chain_changeset.clone(), vec![tx_b.clone()]),
Err((
chain_changeset.clone(),
InflateFailure::Missing(expected_missing)
))
Err(InflateFailure::Missing(expected_missing))
);

let mut additions = tx_graph::Additions::default();
Expand Down
2 changes: 0 additions & 2 deletions bdk_cli_lib/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,3 @@ anyhow = "1"
serde = { version = "1", features = ["derive"] }
thiserror = "1.0.37"
serde_json = { version = "^1.0" }
log = "^0.4"
env_logger = "0.7"
3 changes: 2 additions & 1 deletion bdk_cli_lib/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ where
let address = Address::from_script(&spk, network)
.expect("should always be able to derive address");
println!(
"{} used:{}",
"{:?} {} used:{}",
index,
address,
txout_index.is_used(&(target_keychain, index))
);
Expand Down
1 change: 1 addition & 0 deletions bdk_electrum_example/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/target
13 changes: 13 additions & 0 deletions bdk_electrum_example/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[package]
name = "bdk_electrum_example"
version = "0.1.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# BDK Core
bdk_chain = { path = "../bdk_chain", features = ["serde"] }
bdk_cli = { path = "../bdk_cli_lib"}

# Electrum
electrum-client = { version = "0.12" }
211 changes: 211 additions & 0 deletions bdk_electrum_example/src/electrum.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
use std::{collections::BTreeMap, ops::Deref};

use bdk_chain::{
bitcoin::{BlockHash, Script, Txid},
sparse_chain::{self, SparseChain},
BlockId, TxHeight,
};
use bdk_cli::Broadcast;
use electrum_client::{Client, ElectrumApi};

#[derive(Debug)]
pub enum ElectrumError {
Client(electrum_client::Error),
Reorg,
}

impl core::fmt::Display for ElectrumError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ElectrumError::Client(e) => write!(f, "{}", e),
ElectrumError::Reorg => write!(
f,
"Reorg detected at sync time. Please run the sync call again"
),
}
}
}

impl std::error::Error for ElectrumError {}

impl From<electrum_client::Error> for ElectrumError {
fn from(e: electrum_client::Error) -> Self {
Self::Client(e)
}
}

pub struct ElectrumClient {
inner: Client,
}

impl ElectrumClient {
pub fn new(client: Client) -> Result<Self, ElectrumError> {
Ok(Self { inner: client })
}
}

impl Deref for ElectrumClient {
type Target = Client;
fn deref(&self) -> &Self::Target {
&self.inner
}
}

impl Broadcast for ElectrumClient {
type Error = electrum_client::Error;
fn broadcast(&self, tx: &bdk_chain::bitcoin::Transaction) -> Result<(), Self::Error> {
let _ = self.inner.transaction_broadcast(tx)?;
Ok(())
}
}

impl ElectrumClient {
/// Fetch latest block height.
pub fn get_tip(&self) -> Result<(u32, BlockHash), electrum_client::Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
Ok(self
.inner
.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))?)
}

/// Scan for a given list of scripts, and create an initial [`bdk_chain::sparse_chain::SparseChain`] update candidate.
/// This will only contain [`Txid`]s in SparseChain, and no actual transaction data.
///
/// User needs to fetch the required transaction data and create the final [`bdk_chain::keychain::KeychainChangeSet`] before applying it.
pub fn spk_txid_scan(
&self,
spks: impl Iterator<Item = Script>,
local_chain: &BTreeMap<u32, BlockHash>,
batch_size: usize,
) -> Result<SparseChain, ElectrumError> {
let mut dummy_keychains = BTreeMap::new();
dummy_keychains.insert((), spks.enumerate().map(|(i, spk)| (i as u32, spk)));

Ok(self
.wallet_txid_scan(dummy_keychains, None, local_chain, batch_size)?
.0)
}

/// Scan for a keychain tracker, and create an initial [`bdk_chain::sparse_chain::SparseChain`] update candidate.
/// This will only contain [`Txid`]s in SparseChain, and no actual transaction data.
///
/// User needs to fetch the required transaction data and create the final [`bdk_chain::keychain::KeychainChangeSet`] before applying it.
pub fn wallet_txid_scan<K: Ord + Clone>(
&self,
scripts: BTreeMap<K, impl Iterator<Item = (u32, Script)>>,
stop_gap: Option<usize>,
local_chain: &BTreeMap<u32, BlockHash>,
batch_size: usize,
) -> Result<(SparseChain, BTreeMap<K, u32>), ElectrumError> {
let mut sparse_chain = SparseChain::default();

// Find local chain block that is still there so our update can connect to the local chain.
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
let current_hash = self
.inner
.block_header(existing_height as usize)?
.block_hash();
sparse_chain
.insert_checkpoint(BlockId {
height: existing_height,
hash: current_hash,
})
.expect("This never errors because we are working with a fresh chain");

if current_hash == existing_hash {
break;
}
}

// Insert the new tip so new transactions will be accepted into the sparse chain.
let (tip_height, tip_hash) = self.get_tip()?;
if let Err(e) = sparse_chain.insert_checkpoint(BlockId {
height: tip_height,
hash: tip_hash,
}) {
match e {
sparse_chain::InsertCheckpointErr::HashNotMatching => {
// There has been a re-org before we even begin scanning addresses.
// Just recursively call (this should never happen).
return self.wallet_txid_scan(scripts, stop_gap, local_chain, batch_size);
}
}
}

let mut keychain_index_update = BTreeMap::new();

for (keychain, mut scripts) in scripts {
let mut last_active_index = 0;
let mut unused_script_count = 0usize;

loop {
let mut next_batch = (0..batch_size).filter_map(|_| scripts.next()).peekable();

if next_batch.peek().is_none() {
break;
}

let (indexes, scripts): (Vec<_>, Vec<_>) = next_batch.unzip();

for (history, index) in self
.batch_script_get_history(scripts.iter())?
.into_iter()
.zip(indexes)
{
let txid_list = history
.iter()
.map(|history_result| {
if history_result.height > 0
&& (history_result.height as u32) <= tip_height
{
(
history_result.tx_hash,
TxHeight::Confirmed(history_result.height as u32),
)
} else {
(history_result.tx_hash, TxHeight::Unconfirmed)
}
})
.collect::<Vec<(Txid, TxHeight)>>();

if txid_list.is_empty() {
unused_script_count += 1;
} else {
if index > last_active_index {
last_active_index = index;
}
unused_script_count = 0;
}

for (txid, index) in txid_list {
if let Err(err) = sparse_chain.insert_tx(txid, index) {
match err {
sparse_chain::InsertTxErr::TxTooHigh => {
unreachable!("We should not encounter this error as we ensured TxHeight <= tip_height");
}
sparse_chain::InsertTxErr::TxMoved => {
/* This means there is a reorg, we will handle this situation below */
}
}
}
}
}

if unused_script_count >= stop_gap.unwrap_or(usize::MAX) {
break;
}
}

keychain_index_update.insert(keychain, last_active_index);
}

// Check for Reorg during the above sync process
let our_latest = sparse_chain.latest_checkpoint().expect("must exist");
if our_latest.hash != self.block_header(our_latest.height as usize)?.block_hash() {
return Err(ElectrumError::Reorg);
}

Ok((sparse_chain, keychain_index_update))
}
}
Loading

0 comments on commit b271db7

Please sign in to comment.