Skip to content

Commit

Permalink
First check if we should queue dkg before checking the contract to mi…
Browse files Browse the repository at this point in the history
…nimize unnecessary DKG triggering

Signed-off-by: Jacinta Ferrant <[email protected]>
  • Loading branch information
jferrant committed Apr 8, 2024
1 parent b750401 commit 021c0a9
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 54 deletions.
5 changes: 2 additions & 3 deletions stacks-signer/src/runloop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,10 +383,9 @@ impl SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop {
if event_parity == Some(other_signer_parity) {
continue;
}

if signer.approved_aggregate_public_key.is_none() {
if let Err(e) = signer.update_dkg(&self.stacks_client) {
error!("{signer}: failed to update DKG: {e}");
if let Err(e) = signer.refresh_dkg(&self.stacks_client) {
error!("{signer}: failed to refresh DKG: {e}");
}
}
signer.refresh_coordinator();
Expand Down
122 changes: 71 additions & 51 deletions stacks-signer/src/signer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,22 @@ pub enum Command {
},
}

/// The specific operations that a signer can perform
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Operation {
/// A DKG operation
Dkg,
/// A Sign operation
Sign,
}

/// The Signer state
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum State {
/// The signer is idle, waiting for messages and commands
Idle,
/// The signer is executing a DKG or Sign round
OperationInProgress,
OperationInProgress(Operation),
}

/// The stacks signer registered for the reward cycle
Expand Down Expand Up @@ -343,8 +352,8 @@ impl Signer {
}

/// Update operation
fn update_operation(&mut self) {
self.state = State::OperationInProgress;
fn update_operation(&mut self, operation: Operation) {
self.state = State::OperationInProgress(operation);
self.coordinator_selector.last_message_time = Some(Instant::now());
}

Expand Down Expand Up @@ -374,6 +383,7 @@ impl Signer {
Ok(msg) => {
let ack = self.stackerdb.send_message_with_retry(msg.into());
debug!("{self}: ACK: {ack:?}",);
self.update_operation(Operation::Dkg);
}
Err(e) => {
error!("{self}: Failed to start DKG: {e:?}",);
Expand Down Expand Up @@ -419,6 +429,7 @@ impl Signer {
.unwrap_or_else(|e| {
error!("{self}: Failed to insert block in DB: {e:?}");
});
self.update_operation(Operation::Sign);
}
Err(e) => {
error!("{self}: Failed to start signing block: {e:?}",);
Expand All @@ -427,7 +438,6 @@ impl Signer {
}
}
}
self.update_operation();
}

/// Attempt to process the next command in the queue, and update state accordingly
Expand Down Expand Up @@ -460,10 +470,10 @@ impl Signer {
.expect("BUG: Already asserted that the command queue was not empty");
self.execute_command(stacks_client, &command);
}
State::OperationInProgress => {
State::OperationInProgress(op) => {
// We cannot execute the next command until the current one is finished...
debug!(
"{self}: Waiting for operation to finish. Coordinator state = {:?}",
"{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}",
self.coordinator.state
);
}
Expand Down Expand Up @@ -696,9 +706,26 @@ impl Signer {
self.process_operation_results(stacks_client, &operation_results);
self.send_operation_results(res, operation_results);
self.finish_operation();
} else if !packets.is_empty() && self.coordinator.state != CoordinatorState::Idle {
// We have received a message and are in the middle of an operation. Update our state accordingly
self.update_operation();
} else if !packets.is_empty() {
// We have received a message. Update our state accordingly
// Let us be extra explicit in case a new state type gets added to wsts' state machine
match &self.coordinator.state {
CoordinatorState::Idle => {}
CoordinatorState::DkgPublicDistribute
| CoordinatorState::DkgPublicGather
| CoordinatorState::DkgPrivateDistribute
| CoordinatorState::DkgPrivateGather
| CoordinatorState::DkgEndDistribute
| CoordinatorState::DkgEndGather => {
self.update_operation(Operation::Dkg);
}
CoordinatorState::NonceRequest(_, _)
| CoordinatorState::NonceGather(_, _)
| CoordinatorState::SigShareRequest(_, _)
| CoordinatorState::SigShareGather(_, _) => {
self.update_operation(Operation::Sign);
}
}
}

debug!("{self}: Saving signer state");
Expand Down Expand Up @@ -1277,7 +1304,42 @@ impl Signer {
}
}

/// Refresh DKG value and queue DKG command if necessary
pub fn refresh_dkg(&mut self, stacks_client: &StacksClient) -> Result<(), ClientError> {
// First check if we should queue DKG based on contract vote state and stackerdb transactions
let should_queue = self.should_queue_dkg(stacks_client)?;
// Before queueing the command, check one last time if DKG has been approved (could have happend in the meantime)
let old_dkg = self.approved_aggregate_public_key;
self.approved_aggregate_public_key =
stacks_client.get_approved_aggregate_key(self.reward_cycle)?;
if self.approved_aggregate_public_key.is_some() {
// TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key.
// Need to update state to store the necessary info, check against it to see if we have participated in the winning round and
// then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate.
self.coordinator
.set_aggregate_public_key(self.approved_aggregate_public_key);
if old_dkg != self.approved_aggregate_public_key {
warn!(
"{self}: updated DKG value to {:?}.",
self.approved_aggregate_public_key
);
}
if matches!(self.state, State::OperationInProgress(Operation::Dkg)) {
debug!(
"{self}: DKG has already been set. Aborting DKG operation {}.",
self.coordinator.current_dkg_id
);
self.finish_operation();
}
} else if should_queue {
info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command...");
self.commands.push_front(Command::Dkg);
}
Ok(())
}

/// Should DKG be queued to the current signer's command queue
/// This assumes that no key has been approved by the contract yet
pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result<bool, ClientError> {
if self.state != State::Idle
|| self.signer_id != self.get_coordinator_dkg().0
Expand Down Expand Up @@ -1315,22 +1377,6 @@ impl Signer {
);
return Ok(false);
}

// Try again to get the approved key, in case it has reached the threshold weight
// after we last checked.
self.approved_aggregate_public_key =
stacks_client.get_approved_aggregate_key(self.reward_cycle)?;
if self.approved_aggregate_public_key.is_some() {
self.coordinator
.set_aggregate_public_key(self.approved_aggregate_public_key);
return Ok(false);
}
warn!("{self}: Vote for DKG failed.";
"voting_round" => self.coordinator.current_dkg_id,
"aggregate_key" => %aggregate_key,
"round_weight" => round_weight,
"threshold_weight" => threshold_weight,
);
} else {
// Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction
// Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes
Expand Down Expand Up @@ -1377,32 +1423,6 @@ impl Signer {
Ok(true)
}

/// Update the DKG for the provided signer info, triggering it if required
pub fn update_dkg(&mut self, stacks_client: &StacksClient) -> Result<(), ClientError> {
let old_dkg = self.approved_aggregate_public_key;
self.approved_aggregate_public_key =
stacks_client.get_approved_aggregate_key(self.reward_cycle)?;
if self.approved_aggregate_public_key.is_some() {
// TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key.
// Need to update state to store the necessary info, check against it to see if we have participated in the winning round and
// then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate.
self.coordinator
.set_aggregate_public_key(self.approved_aggregate_public_key);
if old_dkg != self.approved_aggregate_public_key {
warn!(
"{self}: updated DKG value to {:?}.",
self.approved_aggregate_public_key
);
}
return Ok(());
};
if self.should_queue_dkg(stacks_client)? {
info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command...");
self.commands.push_front(Command::Dkg);
}
Ok(())
}

/// Process the event
pub fn process_event(
&mut self,
Expand Down

0 comments on commit 021c0a9

Please sign in to comment.