Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
SecretStore: expose restore_key_public in HTTP API (#10241)
Browse files Browse the repository at this point in the history
  • Loading branch information
svyatonik authored and dvdplm committed Jun 5, 2019
1 parent f7dae48 commit 6be4536
Show file tree
Hide file tree
Showing 9 changed files with 189 additions and 67 deletions.
20 changes: 20 additions & 0 deletions secret-store/src/key_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,22 @@ impl ServerKeyGenerator for KeyServerImpl {
.expect("when wait is called without timeout it always returns Some; qed")
.map_err(Into::into)
}

fn restore_key_public(&self, key_id: &ServerKeyId, author: &Requester) -> Result<Public, Error> {
// recover requestor' public key from signature
let address = author.address(key_id).map_err(Error::InsufficientRequesterData)?;

// negotiate key version && retrieve common key data
let negotiation_session = self.data.lock().cluster.new_key_version_negotiation_session(*key_id)?;
negotiation_session.wait()
.and_then(|_| negotiation_session.common_key_data())
.and_then(|key_share| if key_share.author == address {
Ok(key_share.public)
} else {
Err(Error::AccessDenied)
})
.map_err(Into::into)
}
}

impl DocumentKeyServer for KeyServerImpl {
Expand Down Expand Up @@ -237,6 +253,10 @@ pub mod tests {
fn generate_key(&self, _key_id: &ServerKeyId, _author: &Requester, _threshold: usize) -> Result<Public, Error> {
unimplemented!("test-only")
}

fn restore_key_public(&self, _key_id: &ServerKeyId, _author: &Requester) -> Result<Public, Error> {
unimplemented!("test-only")
}
}

impl DocumentKeyServer for DummyKeyServer {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use key_server_cluster::decryption_session::SessionImpl as DecryptionSession;
use key_server_cluster::signing_session_ecdsa::SessionImpl as EcdsaSigningSession;
use key_server_cluster::signing_session_schnorr::SessionImpl as SchnorrSigningSession;
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions,
KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction};
KeyVersions, KeyVersionsError, FailedKeyVersionContinueAction, CommonKeyData};
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;

// TODO [Opt]: change sessions so that versions are sent by chunks.
Expand Down Expand Up @@ -97,8 +97,8 @@ struct SessionData {
pub state: SessionState,
/// Initialization confirmations.
pub confirmations: Option<BTreeSet<NodeId>>,
/// Key threshold.
pub threshold: Option<usize>,
/// Common key data that nodes have agreed upon.
pub key_share: Option<DocumentKeyShare>,
/// { Version => Nodes }
pub versions: Option<BTreeMap<H256, BTreeSet<NodeId>>>,
/// Session result.
Expand Down Expand Up @@ -167,12 +167,11 @@ pub struct LargestSupportResultComputer;
impl<T> SessionImpl<T> where T: SessionTransport {
/// Create new session.
pub fn new(params: SessionParams<T>) -> Self {
let threshold = params.key_share.as_ref().map(|key_share| key_share.threshold);
SessionImpl {
core: SessionCore {
meta: params.meta,
sub_session: params.sub_session,
key_share: params.key_share,
key_share: params.key_share.clone(),
result_computer: params.result_computer,
transport: params.transport,
nonce: params.nonce,
Expand All @@ -181,7 +180,12 @@ impl<T> SessionImpl<T> where T: SessionTransport {
data: Mutex::new(SessionData {
state: SessionState::WaitingForInitialization,
confirmations: None,
threshold: threshold,
key_share: params.key_share.map(|key_share| DocumentKeyShare {
threshold: key_share.threshold,
author: key_share.author,
public: key_share.public,
..Default::default()
}),
versions: None,
result: None,
continue_with: None,
Expand All @@ -195,12 +199,6 @@ impl<T> SessionImpl<T> where T: SessionTransport {
&self.core.meta
}

/// Return key threshold.
pub fn key_threshold(&self) -> Result<usize, Error> {
self.data.lock().threshold.clone()
.ok_or(Error::InvalidStateForRequest)
}

/// Return result computer reference.
pub fn version_holders(&self, version: &H256) -> Result<BTreeSet<NodeId>, Error> {
Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)?
Expand Down Expand Up @@ -229,6 +227,12 @@ impl<T> SessionImpl<T> where T: SessionTransport {
.expect("wait_session returns Some if called without timeout; qed")
}

/// Retrieve common key data (author, threshold, public), if available.
pub fn common_key_data(&self) -> Result<DocumentKeyShare, Error> {
self.data.lock().key_share.clone()
.ok_or(Error::InvalidStateForRequest)
}

/// Initialize session.
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
// check state
Expand Down Expand Up @@ -322,7 +326,11 @@ impl<T> SessionImpl<T> where T: SessionTransport {
session: self.core.meta.id.clone().into(),
sub_session: self.core.sub_session.clone().into(),
session_nonce: self.core.nonce,
threshold: self.core.key_share.as_ref().map(|key_share| key_share.threshold),
key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData {
threshold: key_share.threshold,
author: key_share.author.into(),
public: key_share.public.into(),
}),
versions: self.core.key_share.as_ref().map(|key_share|
key_share.versions.iter().rev()
.filter(|v| v.id_numbers.contains_key(sender))
Expand Down Expand Up @@ -357,12 +365,25 @@ impl<T> SessionImpl<T> where T: SessionTransport {

// remember versions that sender have
{
match message.threshold.clone() {
Some(threshold) if data.threshold.is_none() => {
data.threshold = Some(threshold);
match message.key_common.as_ref() {
Some(key_common) if data.key_share.is_none() => {
data.key_share = Some(DocumentKeyShare {
threshold: key_common.threshold,
author: key_common.author.clone().into(),
public: key_common.public.clone().into(),
..Default::default()
});
},
Some(key_common) => {
let prev_key_share = data.key_share.as_ref()
.expect("data.key_share.is_none() is matched by previous branch; qed");
if prev_key_share.threshold != key_common.threshold ||
prev_key_share.author.as_bytes() != key_common.author.as_bytes() ||
prev_key_share.public.as_bytes() != key_common.public.as_bytes()
{
return Err(Error::InvalidMessage);
}
},
Some(threshold) if data.threshold.as_ref() == Some(&threshold) => (),
Some(_) => return Err(Error::InvalidMessage),
None if message.versions.is_empty() => (),
None => return Err(Error::InvalidMessage),
}
Expand All @@ -388,7 +409,8 @@ impl<T> SessionImpl<T> where T: SessionTransport {
let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed";
let confirmations = data.confirmations.as_ref().expect(reason);
let versions = data.versions.as_ref().expect(reason);
if let Some(result) = core.result_computer.compute_result(data.threshold.clone(), confirmations, versions) {
let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold);
if let Some(result) = core.result_computer.compute_result(threshold, confirmations, versions) {
// when the master node processing decryption service request, it starts with a key version negotiation session
// if the negotiation fails, only master node knows about it
// => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected
Expand Down Expand Up @@ -590,7 +612,7 @@ impl SessionResultComputer for LargestSupportResultComputer {
mod tests {
use std::sync::Arc;
use std::collections::{VecDeque, BTreeMap, BTreeSet};
use ethereum_types::{H512, Address};
use ethereum_types::{H512, H160, Address};
use ethkey::public_to_address;
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage,
DocumentKeyShare, DocumentKeyShareVersion};
Expand All @@ -600,7 +622,10 @@ mod tests {
use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
use key_server_cluster::decryption_session::create_default_decryption_session;
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
use key_server_cluster::message::{
Message, KeyVersionNegotiationMessage, RequestKeyVersions,
CommonKeyData, KeyVersions,
};
use super::{
SessionImpl, SessionTransport, SessionParams, FastestResultComputer, LargestSupportResultComputer,
SessionResultComputer, SessionState, ContinueAction, FailedContinueAction,
Expand Down Expand Up @@ -759,7 +784,11 @@ mod tests {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: Some(10),
key_common: Some(CommonKeyData {
threshold: 10,
author: Default::default(),
public: Default::default(),
}),
versions: Vec::new(),
})), Err(Error::InvalidStateForRequest));
}
Expand All @@ -775,7 +804,12 @@ mod tests {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: Some(0),
key_common: Some(CommonKeyData {
threshold: 0,
author: Default::default(),
public: Default::default(),
}),

versions: vec![version_id.clone().into()]
})), Ok(()));
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
Expand All @@ -784,32 +818,61 @@ mod tests {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: Some(0),
key_common: Some(CommonKeyData {
threshold: 0,
author: Default::default(),
public: Default::default(),
}),

versions: vec![version_id.clone().into()]
})), Ok(()));
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
}

#[test]
fn negotiation_fails_if_wrong_threshold_sent() {
let ml = MessageLoop::empty(3);
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
fn negotiation_fails_if_wrong_common_data_sent() {
fn run_test(key_common: CommonKeyData) {
let ml = MessageLoop::empty(3);
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();

let version_id = (*math::generate_random_scalar().unwrap()).clone();
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
key_common: Some(CommonKeyData {
threshold: 1,
author: Default::default(),
public: Default::default(),
}),
versions: vec![version_id.clone().into()]
})), Ok(()));
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
key_common: Some(key_common),
versions: vec![version_id.clone().into()]
})), Err(Error::InvalidMessage));
}

run_test(CommonKeyData {
threshold: 2,
author: Default::default(),
public: Default::default(),
});

let version_id = (*math::generate_random_scalar().unwrap()).clone();
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: Some(1),
versions: vec![version_id.clone().into()]
})), Ok(()));
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: Some(2),
versions: vec![version_id.clone().into()]
})), Err(Error::InvalidMessage));
run_test(CommonKeyData {
threshold: 1,
author: H160::from_low_u64_be(1).into(),
public: Default::default(),
});

run_test(CommonKeyData {
threshold: 1,
author: H160::from_low_u64_be(2).into(),
public: Default::default(),
});
}

#[test]
Expand All @@ -822,7 +885,7 @@ mod tests {
session: Default::default(),
sub_session: math::generate_random_scalar().unwrap().into(),
session_nonce: 0,
threshold: None,
key_common: None,
versions: vec![version_id.clone().into()]
})), Err(Error::InvalidMessage));
}
Expand All @@ -832,9 +895,9 @@ mod tests {
let nodes = MessageLoop::prepare_nodes(2);
let version_id = (*math::generate_random_scalar().unwrap()).clone();
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
author: Default::default(),
author: H160::from_low_u64_be(2),
threshold: 1,
public: Default::default(),
public: H512::from_low_u64_be(3),
common_point: None,
encrypted_point: None,
versions: vec![DocumentKeyShareVersion {
Expand All @@ -848,8 +911,13 @@ mod tests {
// we can't be sure that node has given key version because previous ShareAdd session could fail
assert!(ml.session(0).data.lock().state != SessionState::Finished);

// check that upon completion, threshold is known
assert_eq!(ml.session(0).key_threshold(), Ok(1));
// check that upon completion, commmon key data is known
assert_eq!(ml.session(0).common_key_data(), Ok(DocumentKeyShare {
author: H160::from_low_u64_be(2),
threshold: 1,
public: H512::from_low_u64_be(3),
..Default::default()
}));
}

#[test]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -800,7 +800,7 @@ impl SessionImpl {
.wait()?
.expect("initialize_share_change_session is only called on share change master; negotiation session completes with some on master; qed");
let selected_version_holders = negotiation_session.version_holders(&selected_version)?;
let selected_version_threshold = negotiation_session.key_threshold()?;
let selected_version_threshold = negotiation_session.common_key_data()?.threshold;

// prepare session change plan && check if something needs to be changed
let old_nodes_set = selected_version_holders;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use key_server_cluster::cluster_sessions::ClusterSession;
use key_server_cluster::math;
use key_server_cluster::message::{Message, ShareAddMessage, ShareAddConsensusMessage, ConsensusMessageOfShareAdd,
InitializeConsensusSessionOfShareAdd, KeyShareCommon, NewKeysDissemination, ShareAddError,
ConfirmConsensusInitialization};
ConfirmConsensusInitialization, CommonKeyData};
use key_server_cluster::jobs::job_session::JobTransport;
use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport};
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
Expand Down Expand Up @@ -469,9 +469,9 @@ impl<T> SessionImpl<T> where T: SessionTransport {
// update data
data.state = SessionState::WaitingForKeysDissemination;
data.new_key_share = Some(NewKeyShare {
threshold: message.threshold,
author: message.author.clone().into(),
joint_public: message.joint_public.clone().into(),
threshold: message.key_common.threshold,
author: message.key_common.author.clone().into(),
joint_public: message.key_common.public.clone().into(),
common_point: message.common_point.clone().map(Into::into),
encrypted_point: message.encrypted_point.clone().map(Into::into),
});
Expand Down Expand Up @@ -645,9 +645,11 @@ impl<T> SessionImpl<T> where T: SessionTransport {
core.transport.send(new_node, ShareAddMessage::KeyShareCommon(KeyShareCommon {
session: core.meta.id.clone().into(),
session_nonce: core.nonce,
threshold: old_key_share.threshold,
author: old_key_share.author.clone().into(),
joint_public: old_key_share.public.clone().into(),
key_common: CommonKeyData {
threshold: old_key_share.threshold,
author: old_key_share.author.into(),
public: old_key_share.public.into(),
},
common_point: old_key_share.common_point.clone().map(Into::into),
encrypted_point: old_key_share.encrypted_point.clone().map(Into::into),
id_numbers: old_key_version.id_numbers.iter()
Expand Down
Loading

0 comments on commit 6be4536

Please sign in to comment.