Skip to content

Commit

Permalink
fix: remove entries associated to empty chains in compact operations
Browse files Browse the repository at this point in the history
  • Loading branch information
tbrezot committed Nov 21, 2023
1 parent 60a7362 commit ecddafb
Show file tree
Hide file tree
Showing 5 changed files with 288 additions and 88 deletions.
File renamed without changes.
30 changes: 22 additions & 8 deletions src/findex_mm/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,30 @@ impl<
&self,
rng: Arc<Mutex<impl CryptoRngCore>>,
new_key: &<Self as MmEnc<SEED_LENGTH, UserError>>::Key,
indexed_map: HashMap<Token, HashSet<Vec<u8>>>,
remaining_associations: HashMap<Token, HashSet<Vec<u8>>>,
mut continuation: CompactingData<ChainTable>,
new_label: &Label,
) -> Result<(), Error<UserError>> {
let remaining_entry_tokens = continuation
.entries
.keys()
.filter(|token| {
remaining_associations
.get(token)
.map(|associated_values| !associated_values.is_empty())
.unwrap_or(true)
})
.copied()
.collect::<HashSet<_>>();

debug!(
"Step 1: computes new chains from the given `indexed_map` and updates associated \
entries."
);

// Allocates a lower bound on the number of links.
let mut new_links = HashMap::with_capacity(indexed_map.len());
for (entry_token, chain_values) in indexed_map {
let mut new_links = HashMap::with_capacity(remaining_associations.len());
for (entry_token, chain_values) in remaining_associations {
let chain_links = self.decompose::<BLOCK_LENGTH, LINE_WIDTH>(
&chain_values
.into_iter()
Expand Down Expand Up @@ -184,11 +196,13 @@ impl<
let rng = &mut *rng.lock().expect("could not lock mutex");
for (token, entry) in continuation.entries {
old_entries.insert(token);
new_entries.insert(
self.entry_table
.tokenize(new_key, &entry.tag_hash, Some(new_label)),
self.entry_table.prepare(rng, new_key, entry.into())?,
);
if remaining_entry_tokens.get(&token).is_some() {
new_entries.insert(
self.entry_table
.tokenize(new_key, &entry.tag_hash, Some(new_label)),
self.entry_table.prepare(rng, new_key, entry.into())?,
);
}
}
}
let new_links_tokens = new_links.keys().copied().collect();
Expand Down
50 changes: 38 additions & 12 deletions src/index/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ pub use structs::{
IndexedValueToKeywordsMap, Keyword, KeywordToDataMap, Keywords, Label, Location, UserKey,
};

/// User-friendly interface to the Findex algorithm.
#[async_trait(?Send)]
pub trait Index<EntryTable: DxEnc<ENTRY_LENGTH>, ChainTable: DxEnc<LINK_LENGTH>> {
/// Index error type.
Expand All @@ -54,24 +55,49 @@ pub trait Index<EntryTable: DxEnc<ENTRY_LENGTH>, ChainTable: DxEnc<LINK_LENGTH>>
interrupt: &Interrupt,
) -> Result<KeywordToDataMap, Self::Error>;

/// Indexes the given `IndexedValue`s for the given `Keyword`s. Returns the
/// set of keywords added as keys to the index.
/// Adds the given associations to the index.
///
/// Returns the set of keywords added as keys to the index.
async fn add(
&self,
key: &UserKey,
label: &Label,
keywords: IndexedValueToKeywordsMap,
associations: IndexedValueToKeywordsMap,
) -> Result<Keywords, Self::Error>;

/// Removes the indexing of the given `IndexedValue`s for the given
/// `Keyword`s. Returns the set of keywords added to the index.
/// Removes the given associations from the index.
///
/// This operation actually adds the negation of the given associations to the index,
/// effectively increasing the index size. The compact operation is in charge of removing
/// associations that have been negated.
///
/// Returns the set of keywords added to the index.
async fn delete(
&self,
key: &UserKey,
label: &Label,
keywords: IndexedValueToKeywordsMap,
associations: IndexedValueToKeywordsMap,
) -> Result<Keywords, Self::Error>;

/// Compacts a portion of the index.
///
/// It re-encrypts the entire Entry Table which allows to reset the knowledge of the index
/// acquired by an attacker. To this effect at least either the key or the label needs to be
/// changed.
///
/// It partially compacts and re-encrypts the Chain Table. The compacting operation:
/// - removes duplicated associations;
/// - removes deleted associations;
/// - removes obsolete indexed data;
/// - ensures the padding is minimal.
///
/// The `filter` is called with batches of the data read from the index. Only the data returned
/// by it is indexed back.
///
/// The entire index is statistically guaranteed to be compacted after calling this operation
/// `n_compact_to_full` times. For example, if one is passed, the entire index will be
/// compacted at once. If ten is passed, the entire index should have been compacted after the
/// tenth call.
async fn compact<
F: Future<Output = Result<HashSet<Location>, String>>,
Filter: Fn(HashSet<Location>) -> F,
Expand All @@ -86,6 +112,7 @@ pub trait Index<EntryTable: DxEnc<ENTRY_LENGTH>, ChainTable: DxEnc<LINK_LENGTH>>
) -> Result<(), Self::Error>;
}

/// Findex type implements the Findex algorithm.
#[derive(Debug)]
pub struct Findex<
UserError: CallbackErrorTrait,
Expand Down Expand Up @@ -401,16 +428,15 @@ impl<
let remaining_values = associated_values
.into_iter()
.filter(|value| {
if let Some(location) = value.get_data() {
remaining_locations.contains(location)
} else {
true
}
// Filter out obsolete locations.
value
.get_data()
.map(|location| remaining_locations.contains(location))
.unwrap_or(true)
})
.collect::<HashSet<_>>();
(entry_token, remaining_values)
})
.filter(|(_, remaining_values)| !remaining_values.is_empty())
.collect::<HashMap<_, _>>();

self.findex_graph
Expand Down
Loading

0 comments on commit ecddafb

Please sign in to comment.