Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Redefining lastDeclareJob #1022

Merged
merged 2 commits into from
Jul 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions roles/jd-client/src/lib/downstream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ pub type EitherFrame = StandardEitherFrame<Message>;

/// 1 to 1 connection with a downstream node that implement the mining (sub)protocol can be either
/// a mining device or a downstream proxy.
/// A downstream can only be linked with an upstream at a time. Support multi upstrems for
/// downstream do no make much sense.
/// A downstream can only be linked with an upstream at a time. Support multi upstreams for
/// downstream do not make much sense.
#[derive(Debug)]
pub struct DownstreamMiningNode {
receiver: Receiver<EitherFrame>,
Expand Down Expand Up @@ -181,7 +181,7 @@ impl DownstreamMiningNode {
}
}

/// Send SetupConnectionSuccess to donwstream and start processing new messages coming from
/// Send SetupConnectionSuccess to downstream and start processing new messages coming from
/// downstream
pub async fn start(
self_mutex: &Arc<Mutex<Self>>,
Expand Down Expand Up @@ -225,7 +225,7 @@ impl DownstreamMiningNode {
// mining channel success
fn set_channel_factory(self_mutex: Arc<Mutex<Self>>) {
if !self_mutex.safe_lock(|s| s.status.is_solo_miner()).unwrap() {
// Safe unwrap already checked if it contains an upstream withe `is_solo_miner`
// Safe unwrap already checked if it contains an upstream with `is_solo_miner`
let upstream = self_mutex
.safe_lock(|s| s.status.get_upstream().unwrap())
.unwrap();
Expand Down
20 changes: 14 additions & 6 deletions roles/jd-client/src/lib/job_declarator/message_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,20 @@ impl ParseServerJobDeclarationMessages for JobDeclarator {
) -> Result<SendTo, Error> {
let tx_list = self
.last_declare_mining_jobs_sent
.get(&message.request_id)
.unwrap()
.clone()
.unwrap()
.tx_list
.into_inner();
.iter()
Shourya742 marked this conversation as resolved.
Show resolved Hide resolved
.find_map(|entry| {
if let Some((id, last_declare_job)) = entry {
if *id == message.request_id {
Some(last_declare_job.clone().tx_list.into_inner())
} else {
None
}
} else {
None
}
})
.ok_or_else(|| Error::UnknownRequestId(message.request_id))?;

let unknown_tx_position_list: Vec<u16> = message.unknown_tx_position_list.into_inner();
let missing_transactions: Vec<binary_sv2::B016M> = unknown_tx_position_list
.iter()
Expand Down
46 changes: 30 additions & 16 deletions roles/jd-client/src/lib/job_declarator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ pub struct JobDeclarator {
req_ids: Id,
min_extranonce_size: u16,
// (Sent DeclareMiningJob, is future, template id, merkle path)
last_declare_mining_jobs_sent: HashMap<u32, Option<LastDeclareJob>>,
last_declare_mining_jobs_sent: [Option<(u32, LastDeclareJob)>; 2],
GitGab19 marked this conversation as resolved.
Show resolved Hide resolved
last_set_new_prev_hash: Option<SetNewPrevHash<'static>>,
set_new_prev_hash_counter: u8,
#[allow(clippy::type_complexity)]
Expand Down Expand Up @@ -115,7 +115,7 @@ impl JobDeclarator {
allocated_tokens: vec![],
req_ids: Id::new(),
min_extranonce_size,
last_declare_mining_jobs_sent: HashMap::with_capacity(10),
last_declare_mining_jobs_sent: [None, None],
last_set_new_prev_hash: None,
future_jobs: HashMap::with_hasher(BuildNoHashHasher::default()),
up,
Expand All @@ -130,32 +130,47 @@ impl JobDeclarator {
Ok(self_)
}

fn get_last_declare_job_sent(self_mutex: &Arc<Mutex<Self>>, request_id: u32) -> LastDeclareJob {
fn get_last_declare_job_sent(
self_mutex: &Arc<Mutex<Self>>,
request_id: u32,
) -> Option<LastDeclareJob> {
self_mutex
.safe_lock(|s| {
s.last_declare_mining_jobs_sent
.get(&request_id)
.expect("LastDeclareJob not found")
.clone()
.expect("unreachable code")
for (id, job) in s.last_declare_mining_jobs_sent.iter().flatten() {
if *id == request_id {
return Some(job.to_owned());
}
}
None
})
.unwrap()
}

/// We maintain a window of 2 jobs. If more than 2 blocks are found,
/// the ordering will depend on the request ID. Only the 2 most recent request
/// IDs will be kept in memory, while the rest will be discarded.
/// More information can be found here: https://github.com/stratum-mining/stratum/pull/904#discussion_r1609469048
fn update_last_declare_job_sent(
self_mutex: &Arc<Mutex<Self>>,
request_id: u32,
j: LastDeclareJob,
) {
self_mutex
.safe_lock(|s| {
//check hashmap size in order to not let it grow indefinetely
if s.last_declare_mining_jobs_sent.len() < 10 {
s.last_declare_mining_jobs_sent.insert(request_id, Some(j));
} else if let Some(min_key) = s.last_declare_mining_jobs_sent.keys().min().cloned()
if let Some(empty_index) = s
.last_declare_mining_jobs_sent
.iter()
.position(|entry| entry.is_none())
{
s.last_declare_mining_jobs_sent[empty_index] = Some((request_id, j));
} else if let Some((min_index, _)) = s
.last_declare_mining_jobs_sent
.iter()
.enumerate()
.filter_map(|(i, entry)| entry.as_ref().map(|(id, _)| (i, id)))
.min_by_key(|&(_, id)| id)
{
s.last_declare_mining_jobs_sent.remove(&min_key);
s.last_declare_mining_jobs_sent.insert(request_id, Some(j));
s.last_declare_mining_jobs_sent[min_index] = Some((request_id, j));
}
})
.unwrap();
Expand Down Expand Up @@ -289,8 +304,7 @@ impl JobDeclarator {
match next_message_to_send {
Ok(SendTo::None(Some(JobDeclaration::DeclareMiningJobSuccess(m)))) => {
let new_token = m.new_mining_job_token;
let last_declare =
Self::get_last_declare_job_sent(&self_mutex, m.request_id);
let last_declare = Self::get_last_declare_job_sent(&self_mutex, m.request_id).unwrap_or_else(|| panic!("Failed to get last declare job: job not found, Request Id: {:?}.", m.request_id));
let mut last_declare_mining_job_sent = last_declare.declare_job;
let is_future = last_declare.template.future_template;
let id = last_declare.template.template_id;
Expand Down
2 changes: 1 addition & 1 deletion roles/jd-client/src/lib/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use std::{sync::atomic::AtomicBool, time::Duration};
/// In the meantime if the context that is running the template receiver receives a SetNewPrevHash
/// it wait until the value of this global is true before doing anything.
///
/// Acuire and Release memory ordering is used.
/// Acquire and Release memory ordering is used.
///
/// Memory Ordering Explanation:
/// We use Acquire-Release ordering instead of SeqCst or Relaxed for the following reasons:
Expand Down
14 changes: 7 additions & 7 deletions roles/jd-client/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> {
/// TODO on setupconnection with bitcoind (TP) JDC must signal that want a tx short hash list with
/// the templates
///
/// TODO JDC must handle TxShortHahhList message
/// TODO JDC must handle TxShortHashList message
///
/// This will start:
/// 1. An Upstream, this will connect with the mining Pool
/// 2. A listner that will wait for a mining downstream with ExtendedChannel capabilities (tproxy,
/// minin-proxy)
/// mining-proxy)
/// 3. A JobDeclarator, this will connect with the job-declarator-server
/// 4. A TemplateRx, this will connect with bitcoind
///
Expand All @@ -78,15 +78,15 @@ fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> {
/// Then we receive CommitMiningJobSuccess and we use the new token to send SetCustomMiningJob to
/// the pool.
/// When we receive SetCustomMiningJobSuccess we set in Upstream job_id equal to the one received
/// in SetCustomMiningJobSuccess so that we sill send shares upstream with the right job_id.
/// in SetCustomMiningJobSuccess so that we still send shares upstream with the right job_id.
///
/// The above procedure, let us send NewExtendedMiningJob downstream right after a NewTemplate has
/// been received this will reduce the time that pass from a NewTemplate and the mining-device
/// starting to mine on the new job.
///
/// In the case a future NewTemplate the SetCustomMiningJob is sent only if the canditate become
/// the actual NewTemplate so that we do not send a lot of usless future Job to the pool. That
/// means that SetCustomMiningJob is sent only when a NewTemplate becom "active"
/// the actual NewTemplate so that we do not send a lot of useless future Job to the pool. That
/// means that SetCustomMiningJob is sent only when a NewTemplate become "active"
///
/// The JobDeclarator always have 2 avaiable token, that means that whenever a token is used to
/// commit a job with upstream we require a new one. Having always a token when needed means that
Expand Down Expand Up @@ -217,7 +217,7 @@ async fn initialize_jd_as_solo_miner(
};
let miner_tx_out = lib::proxy_config::get_coinbase_output(&proxy_config).unwrap();

// When Downstream receive a share that meets bitcoin target it transformit in a
// When Downstream receive a share that meets bitcoin target it transform it in a
// SubmitSolution and send it to the TemplateReceiver
let (send_solution, recv_solution) = bounded(10);

Expand Down Expand Up @@ -290,7 +290,7 @@ async fn initialize_jd(
port,
);

// When Downstream receive a share that meets bitcoin target it transformit in a
// When Downstream receive a share that meets bitcoin target it transform it in a
// SubmitSolution and send it to the TemplateReceiver
let (send_solution, recv_solution) = bounded(10);

Expand Down
Loading