Skip to content

Commit

Permalink
fix: improve namings, docs and avoid reallocations
Browse files Browse the repository at this point in the history
  • Loading branch information
tbrezot committed Jul 26, 2023
1 parent ec28af8 commit b458148
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 77 deletions.
64 changes: 22 additions & 42 deletions refactored/src/findex_mm/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ impl<
ChainTable: DxEnc<LINK_LENGTH, Error = Error<UserError>>,
> FindexMultiMap<UserError, EntryTable, ChainTable>
{
/// Returns an unordered list of all Entry Table tokens.
/// Returns the set of Entry Table tokens.
pub async fn dump_entry_tokens(
&self,
) -> Result<Vec<<EntryTable as DxEnc<ENTRY_LENGTH>>::Token>, Error<UserError>> {
Expand Down Expand Up @@ -72,7 +72,7 @@ impl<
.map(Link)
})
})
.collect::<Result<Vec<Link>, Error<UserError>>>()?;
.collect::<Result<Vec<_>, _>>()?;

indexed_values.insert(
*entry_token,
Expand All @@ -83,28 +83,6 @@ impl<
Ok((indexed_values, CompactingData { metadata, entries }))
}

// TODO: return metadata + batch_entries + indexed_values
// TODO: what is the appropriate level for compacting operation? what does
// compacting mean for the multi-map? for the graph? for the index?
//
// TODO: put all this in Index?
// + need to pass through FindexGraph in order to deserialize
// `Locations`
// //
// // Filter obsolete values.
// //
// let remaining_values = self
// .db
//
// .filter_obsolete_values(indexed_values.values().flatten().cloned().collect())
// .await?;
//
//
// Recompose chains and update associated entries. A new seed is randomly
// generated to derive new token and encryption keys. The last UID stored is
// updated accordingly.
//

/// Completes the compacting operation:
/// 1. computes new links from the given `indexed_values` and update
/// associated entries.
Expand All @@ -115,19 +93,19 @@ impl<
&mut self,
rng: Arc<Mutex<impl CryptoRngCore>>,
new_key: &<Self as MmEnc<SEED_LENGTH, UserError>>::Key,
indexed_values: HashMap<<EntryTable as DxEnc<ENTRY_LENGTH>>::Token, HashSet<Vec<u8>>>,
indexed_map: HashMap<<EntryTable as DxEnc<ENTRY_LENGTH>>::Token, HashSet<Vec<u8>>>,
mut continuation: CompactingData<EntryTable, ChainTable>,
) -> Result<(), Error<UserError>> {
//
// 1. computes new links from the given `indexed_values` and update associated
// 1. computes new chains from the given `indexed_map` and update associated
// entries.
//

// Allocate an lower bound on the number of links.
let mut new_links = HashMap::with_capacity(indexed_values.len());
for (entry_token, values) in indexed_values {
let links = self.decompose::<BLOCK_LENGTH, LINE_LENGTH>(
&values
let mut new_links = HashMap::with_capacity(indexed_map.len());
for (entry_token, chain_values) in indexed_map {
let chain_links = self.decompose::<BLOCK_LENGTH, LINE_LENGTH>(
&chain_values
.into_iter()
.map(|v| (Operation::Addition, v))
.collect::<Vec<_>>(),
Expand All @@ -140,23 +118,25 @@ impl<
})?;

let rng = &mut *rng.lock().expect("could not lock mutex");
let mut new_entry =
Entry::<ChainTable>::new(self.chain_table.gen_seed(rng), old_entry.hash, None);

let ct_key = self.chain_table.derive_keys(&new_entry.seed);
let new_chain_tokens =
self.derive_chain_tokens(&ct_key, new_entry.hash.into(), links.len())?;
new_entry.token = new_chain_tokens.last().copied();
*old_entry = new_entry;
for (token, link) in new_chain_tokens.into_iter().zip(links) {
new_links.insert(token, self.chain_table.prepare(&mut *rng, &ct_key, link.0)?);
let mut new_entry = Entry::new(self.chain_table.gen_seed(rng), old_entry.hash, None);

let chain_key = self.chain_table.derive_keys(&new_entry.seed);
let chain_tokens =
self.derive_chain_tokens(&chain_key, new_entry.hash.into(), chain_links.len())?;
new_entry.token = chain_tokens.last().copied();
for (token, link) in chain_tokens.into_iter().zip(chain_links) {
new_links.insert(
token,
self.chain_table.prepare(&mut *rng, &chain_key, link.0)?,
);
}
*old_entry = new_entry;
}

let old_links = continuation
.metadata
.iter()
.flat_map(|(_, (_, chain_tokens))| chain_tokens)
.values()
.flat_map(|(_, chain_tokens)| chain_tokens)
.copied()
.collect();

Expand Down
59 changes: 24 additions & 35 deletions refactored/src/findex_mm/mm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ impl<
ChainTable: DxEnc<LINK_LENGTH, Error = Error<UserError>>,
> FindexMultiMap<UserError, EntryTable, ChainTable>
{
/// Instantiate a new `Findex Multi-map`.
/// Instantiate a new `FindexMultiMap`.
pub fn new(entry_table: EntryTable, chain_table: ChainTable) -> Self {
Self {
entry_table,
Expand Down Expand Up @@ -123,48 +123,41 @@ impl<
&self,
modifications: &[(Operation, <Self as MmEnc<SEED_LENGTH, UserError>>::Item)],
) -> Result<Vec<Link>, Error<UserError>> {
let mut res = Vec::with_capacity(modifications.len());
let mut ct_value = Link::new();
// Allocate a lower bound on the number of chain links.
let mut chain = Vec::with_capacity(modifications.len());
let mut link = Link::new();
let mut pos = 0;

for (operation, value) in modifications {
let full_block_number = value.len() / BLOCK_LENGTH;

for i in 0..full_block_number {
ct_value
.set_operation(pos, *operation)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
ct_value
.set_block(pos, &value[i * BLOCK_LENGTH..(i + 1) * BLOCK_LENGTH], false)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
link.set_operation(pos, *operation)?;
link.set_block(pos, &value[i * BLOCK_LENGTH..(i + 1) * BLOCK_LENGTH], false)?;
pos += 1;
if pos == LINE_LENGTH {
res.push(ct_value);
ct_value = Link::new();
chain.push(link);
link = Link::new();
pos = 0
}
}

ct_value
.set_operation(pos, *operation)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
ct_value
.set_block(pos, &value[full_block_number * BLOCK_LENGTH..], true)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
link.set_operation(pos, *operation)?;
link.set_block(pos, &value[full_block_number * BLOCK_LENGTH..], true)?;
pos += 1;
if pos == LINE_LENGTH {
res.push(ct_value);
ct_value = Link::new();
chain.push(link);
link = Link::new();
pos = 0
}
}

// Don't forget the last line if some blocks were written to it.
if pos != 0 {
res.push(ct_value);
chain.push(link);
}

Ok(res)
Ok(chain)
}

/// Recomposes the given sequence of Chain Table values into Findex values.
Expand All @@ -181,25 +174,21 @@ impl<
&self,
chain: &[Link],
) -> Result<HashSet<<Self as MmEnc<SEED_LENGTH, UserError>>::Item>, Error<UserError>> {
let mut res = HashSet::with_capacity(chain.len());
// Allocate an upper bound on the number of values.
let mut indexed_values = HashSet::with_capacity(chain.len() * LINE_LENGTH);
let mut stack = Vec::new();
let mut current_operation = None;
let mut stack = Vec::with_capacity(chain.len() / LINE_LENGTH);

for ct_value in chain.iter() {
for pos in 0..LINE_LENGTH {
let (is_terminating, data) = ct_value
.get_block(pos)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
let operation = ct_value
.get_operation(pos)
.map_err(|e| CoreError::Crypto(e.to_string()))?;
let (is_terminating, data) = ct_value.get_block(pos)?;
let operation = ct_value.get_operation(pos)?;

if current_operation.is_some() && current_operation.as_ref() != Some(&operation) {
return Err(CoreError::Crypto(
return Err(Error::<UserError>::Crypto(
"findex value cannot be decomposed into blocks with different operations"
.to_string(),
)
.into());
));
}

if is_terminating {
Expand All @@ -211,9 +200,9 @@ impl<
findex_value.extend(data);

if Operation::Addition == operation {
res.insert(findex_value);
indexed_values.insert(findex_value);
} else {
res.remove(&findex_value);
indexed_values.remove(&findex_value);
}

current_operation = None;
Expand All @@ -226,7 +215,7 @@ impl<
}
}
}
Ok(res)
Ok(indexed_values)
}

/// Derives the chain metadata from the given entry:
Expand Down

0 comments on commit b458148

Please sign in to comment.