From fad9a11c19e21dccc1306efedbadb2339c7701d3 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 22 Apr 2024 13:08:13 +0200 Subject: [PATCH 001/198] initial commit --- Cargo.lock | 60 ++++++++++- Cargo.toml | 3 +- iroh-willow/Cargo.toml | 60 +++++++++++ iroh-willow/LICENSE-APACHE | 201 +++++++++++++++++++++++++++++++++++++ iroh-willow/LICENSE-MIT | 25 +++++ iroh-willow/README.md | 20 ++++ iroh-willow/src/lib.rs | 0 7 files changed, 366 insertions(+), 3 deletions(-) create mode 100644 iroh-willow/Cargo.toml create mode 100644 iroh-willow/LICENSE-APACHE create mode 100644 iroh-willow/LICENSE-MIT create mode 100644 iroh-willow/README.md create mode 100644 iroh-willow/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 0a06fd14bf..a72a7cfb85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,7 +37,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.32", ] [[package]] @@ -2917,6 +2917,42 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "iroh-willow" +version = "0.14.0" +dependencies = [ + "anyhow", + "bytes", + "derive_more", + "ed25519-dalek", + "flume", + "futures", + "hex", + "iroh-base", + "iroh-metrics", + "iroh-net", + "iroh-test", + "num_enum", + "postcard", + "proptest", + "quinn", + "rand", + "rand_chacha", + "rand_core", + "redb 2.0.0", + "self_cell", + "serde", + "strum 0.25.0", + "tempfile", + "test-strategy", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "zerocopy 0.8.0-alpha.7", +] + [[package]] name = "is-terminal" version = "0.4.12" @@ -6697,7 +6733,16 @@ version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.7.32", +] + +[[package]] +name = "zerocopy" +version = "0.8.0-alpha.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a24d6914f948ad0c1eaf3f2cb03a66e674714280d020ea8d955f765f8abb2e7a" +dependencies = [ + "zerocopy-derive 0.8.0-alpha.7", ] [[package]] @@ -6711,6 +6756,17 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.0-alpha.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e862f7936bea2c96af2769d9d60ff534da9af29dd59943519403256f30bf5ac3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.59", +] + [[package]] name = "zeroize" version = "1.7.0" diff --git a/Cargo.toml b/Cargo.toml index a6099c70fa..2c23fef057 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,8 @@ members = [ "iroh-docs", "iroh-test", "iroh-net/bench", - "iroh-cli" + "iroh-cli", + "iroh-willow", ] resolver = "2" diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml new file mode 100644 index 0000000000..c40edb520a --- /dev/null +++ b/iroh-willow/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "iroh-willow" +version = "0.14.0" +edition = "2021" +readme = "README.md" +description = "willow protocol implementation for iroh" +license = "MIT/Apache-2.0" +authors = ["n0 team"] +repository = "https://github.com/n0-computer/iroh" + +# Sadly this also needs to be updated in .github/workflows/ci.yml +rust-version = "1.75" + +[lints] +workspace = true + +[dependencies] +anyhow = "1" +derive_more = { version = "1.0.0-beta.1", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref"] } +ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } +flume = "0.11" +iroh-base = { version = "0.14.0", path = "../iroh-base" } +iroh-metrics = { version = "0.14.0", path = "../iroh-metrics", optional = true } +num_enum = "0.7" +postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } +rand = "0.8.5" +rand_core = "0.6.4" +serde = { version = "1.0.164", features = ["derive"] } +strum = { version = "0.25", features = ["derive"] } +bytes = { version = "1.4", features = ["serde"] } +hex = "0.4" +thiserror = "1" +tracing = "0.1" +tokio = { version = "1", features = ["sync"] } + +# fs-store +redb = { version = "2.0.0" } +tempfile = { version = "3.4" } + +# net +iroh-net = { version = "0.14.0", optional = true, path = "../iroh-net" } +tokio-util = { version = "0.7", optional = true, features = ["codec", "io-util", "io"] } +tokio-stream = { version = "0.1", optional = true, features = ["sync"]} +quinn = { version = "0.10", optional = true } +futures = { version = "0.3", optional = true } +self_cell = "1.0.3" +zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } + +[dev-dependencies] +iroh-test = { path = "../iroh-test" } +rand_chacha = "0.3.1" +tokio = { version = "1", features = ["sync", "macros"] } +proptest = "1.2.0" +tempfile = "3.4" +test-strategy = "0.3.1" + +[features] +default = ["net", "metrics"] +net = ["iroh-net", "tokio/io-util", "tokio-stream", "tokio-util", "quinn", "futures"] +metrics = ["iroh-metrics"] diff --git a/iroh-willow/LICENSE-APACHE b/iroh-willow/LICENSE-APACHE new file mode 100644 index 0000000000..e2db928492 --- /dev/null +++ b/iroh-willow/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2023] [N0, INC] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/iroh-willow/LICENSE-MIT b/iroh-willow/LICENSE-MIT new file mode 100644 index 0000000000..dfd85baf84 --- /dev/null +++ b/iroh-willow/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2023 + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/iroh-willow/README.md b/iroh-willow/README.md new file mode 100644 index 0000000000..f488797394 --- /dev/null +++ b/iroh-willow/README.md @@ -0,0 +1,20 @@ +# iroh-willow + +Minimal implementation of Willow, Meadowcap, and WGPS for iroh + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this project by you, as defined in the Apache-2.0 license, +shall be dual licensed as above, without any additional terms or conditions. diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs new file mode 100644 index 0000000000..e69de29bb2 From d04db1b28bd13db4840fc532b1d70d25bdf4d992 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 22 Apr 2024 13:08:32 +0200 Subject: [PATCH 002/198] initial implementation work --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/lib.rs | 8 + iroh-willow/src/proto.rs | 4 + iroh-willow/src/proto/keys.rs | 549 +++++++++++++++++++++++++++++ iroh-willow/src/proto/meadowcap.rs | 241 +++++++++++++ iroh-willow/src/proto/wgps.rs | 472 +++++++++++++++++++++++++ iroh-willow/src/proto/willow.rs | 281 +++++++++++++++ iroh-willow/src/session.rs | 471 +++++++++++++++++++++++++ 9 files changed, 2028 insertions(+) create mode 100644 iroh-willow/src/proto.rs create mode 100644 iroh-willow/src/proto/keys.rs create mode 100644 iroh-willow/src/proto/meadowcap.rs create mode 100644 iroh-willow/src/proto/wgps.rs create mode 100644 iroh-willow/src/proto/willow.rs create mode 100644 iroh-willow/src/session.rs diff --git a/Cargo.lock b/Cargo.lock index a72a7cfb85..20498bfb50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2927,6 +2927,7 @@ dependencies = [ "ed25519-dalek", "flume", "futures", + "genawaiter", "hex", "iroh-base", "iroh-metrics", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index c40edb520a..01cf4a552c 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -45,6 +45,7 @@ quinn = { version = "0.10", optional = true } futures = { version = "0.3", optional = true } self_cell = "1.0.3" zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } +genawaiter = "0.99.1" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index e69de29bb2..5aa433ef94 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -0,0 +1,8 @@ +//! Implementation of willow + +#![allow(missing_docs, unused_imports, dead_code)] + +pub mod session; +pub mod proto; +pub mod net; +pub mod store; diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs new file mode 100644 index 0000000000..8ccb4c7880 --- /dev/null +++ b/iroh-willow/src/proto.rs @@ -0,0 +1,4 @@ +pub mod wgps; +pub mod willow; +pub mod meadowcap; +pub mod keys; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs new file mode 100644 index 0000000000..79abafdf37 --- /dev/null +++ b/iroh-willow/src/proto/keys.rs @@ -0,0 +1,549 @@ +//! Keys used in iroh-sync + +use std::{cmp::Ordering, fmt, str::FromStr}; + +use ed25519_dalek::{SignatureError, Signer, SigningKey, VerifyingKey}; +use iroh_base::base32; +use rand_core::CryptoRngCore; +use serde::{Deserialize, Serialize}; + +// use crate::store::PublicKeyStore; + +pub const PUBLIC_KEY_LENGTH: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; +pub const SECRET_KEY_LENGTH: usize = ed25519_dalek::SECRET_KEY_LENGTH; +pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; + +pub type SubspaceId = UserId; + +pub type Signature = ed25519_dalek::Signature; + +/// User key to insert entries in a [`crate::Replica`] +/// +/// Internally, an author is a [`SigningKey`] which is used to sign entries. +#[derive(Clone, Serialize, Deserialize)] +pub struct UserSecretKey(SigningKey); + +impl UserSecretKey { + /// Create a new [`UserSecretKey`] with a random key. + pub fn generate(rng: &mut R) -> Self { + let signing_key = SigningKey::generate(rng); + UserSecretKey(signing_key) + } + + /// Create an [`UserSecretKey`] from a byte array. + pub fn from_bytes(bytes: &[u8; 32]) -> Self { + SigningKey::from_bytes(bytes).into() + } + + /// Returns the [`UserSecretKey`] byte representation. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + + /// Get the [`UserPublicKey`] for this author. + pub fn public_key(&self) -> UserPublicKey { + UserPublicKey(self.0.verifying_key()) + } + + /// Get the [`UserId`] for this author. + pub fn id(&self) -> UserId { + UserId::from(self.public_key()) + } + + /// Sign a message with this [`UserSecretKey`] key. + pub fn sign(&self, msg: &[u8]) -> Signature { + self.0.sign(msg) + } + + /// Strictly verify a signature on a message with this [`UserSecretKey`]'s public key. + pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, signature) + } +} + +/// Identifier for an [`UserSecretKey`] +/// +/// This is the corresponding [`VerifyingKey`] for an author. It is used as an identifier, and can +/// be used to verify [`Signature`]s. +#[derive(Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash, derive_more::From)] +pub struct UserPublicKey(VerifyingKey); + +impl UserPublicKey { + /// Verify that a signature matches the `msg` bytes and was created with the [`UserSecretKey`] + /// that corresponds to this [`UserId`]. + pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, signature) + } + + /// Get the byte representation of this [`UserId`]. + pub fn as_bytes(&self) -> &[u8; 32] { + self.0.as_bytes() + } + + /// Create from a slice of bytes. + /// + /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] + /// curve point. Will never fail for a byte array returned from [`Self::as_bytes`]. + /// See [`VerifyingKey::from_bytes`] for details. + pub fn from_bytes(bytes: &[u8; 32]) -> Result { + Ok(UserPublicKey(VerifyingKey::from_bytes(bytes)?)) + } +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub enum NamespaceType { + Communal, + Owned, +} + +/// Namespace key of a [`crate::Replica`]. +/// +/// Holders of this key can insert new entries into a [`crate::Replica`]. +/// Internally, a [`NamespaceSecretKey] is a [`SigningKey`] which is used to sign entries. +#[derive(Clone, Serialize, Deserialize)] +pub struct NamespaceSecretKey(SigningKey); + +impl NamespaceSecretKey { + /// Create a new [`NamespaceSecretKey] with a random key. + pub fn generate(rng: &mut R, typ: NamespaceType) -> Self { + loop { + let signing_key = SigningKey::generate(rng); + let secret_key = NamespaceSecretKey(signing_key); + if secret_key.public_key().namespace_type() == typ { + break secret_key; + } + } + } + + /// Create a [`NamespaceSecretKey] from a byte array. + pub fn from_bytes(bytes: &[u8; 32]) -> Self { + SigningKey::from_bytes(bytes).into() + } + + /// Returns the [`NamespaceSecretKey] byte representation. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + + /// Get the [`NamespacePublicKey`] for this namespace. + pub fn public_key(&self) -> NamespacePublicKey { + NamespacePublicKey(self.0.verifying_key()) + } + + /// Get the [`NamespaceId`] for this namespace. + pub fn id(&self) -> NamespaceId { + NamespaceId::from(self.public_key()) + } + + /// Sign a message with this [`NamespaceSecretKey] key. + pub fn sign(&self, msg: &[u8]) -> Signature { + self.0.sign(msg) + } + + /// Strictly verify a signature on a message with this [`NamespaceSecretKey]'s public key. + pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, signature) + } +} + +/// The corresponding [`VerifyingKey`] for a [`NamespaceSecretKey]. +/// It is used as an identifier, and can be used to verify [`Signature`]s. +#[derive(Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, derive_more::From)] +pub struct NamespacePublicKey(VerifyingKey); + +impl NamespacePublicKey { + /// Whether this is the key for a communal namespace. + pub fn is_communal(&self) -> bool { + is_communal(self.as_bytes()) + } + + pub fn namespace_type(&self) -> NamespaceType { + match self.is_communal() { + true => NamespaceType::Communal, + false => NamespaceType::Owned, + } + } + + /// Verify that a signature matches the `msg` bytes and was created with the [`NamespaceSecretKey] + /// that corresponds to this [`NamespaceId`]. + pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, signature) + } + + /// Get the byte representation of this [`NamespaceId`]. + pub fn as_bytes(&self) -> &[u8; 32] { + self.0.as_bytes() + } + + /// Create from a slice of bytes. + /// + /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] + /// curve point. Will never fail for a byte array returned from [`Self::as_bytes`]. + /// See [`VerifyingKey::from_bytes`] for details. + pub fn from_bytes(bytes: &[u8; 32]) -> Result { + Ok(NamespacePublicKey(VerifyingKey::from_bytes(bytes)?)) + } +} + +impl fmt::Display for UserSecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.to_bytes())) + } +} + +impl fmt::Display for NamespaceSecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.to_bytes())) + } +} + +impl fmt::Display for UserPublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.as_bytes())) + } +} + +impl fmt::Display for NamespacePublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.as_bytes())) + } +} + +impl fmt::Display for UserId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.as_bytes())) + } +} + +impl fmt::Display for NamespaceId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(self.as_bytes())) + } +} + +impl fmt::Debug for NamespaceSecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Namespace({})", self) + } +} + +impl fmt::Debug for NamespaceId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "NamespaceId({})", base32::fmt_short(self.0)) + } +} + +impl fmt::Debug for UserId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserId({})", base32::fmt_short(self.0)) + } +} + +impl fmt::Debug for UserSecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "User({})", self) + } +} + +impl fmt::Debug for NamespacePublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "NamespacePublicKey({})", self) + } +} + +impl fmt::Debug for UserPublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserPublicKey({})", self) + } +} + +impl FromStr for UserSecretKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(Self::from_bytes(&base32::parse_array(s)?)) + } +} + +impl FromStr for NamespaceSecretKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(Self::from_bytes(&base32::parse_array(s)?)) + } +} + +impl FromStr for UserPublicKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Self::from_bytes(&base32::parse_array(s)?).map_err(Into::into) + } +} + +impl FromStr for NamespacePublicKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Self::from_bytes(&base32::parse_array(s)?).map_err(Into::into) + } +} + +impl From for UserSecretKey { + fn from(signing_key: SigningKey) -> Self { + Self(signing_key) + } +} + +impl From for NamespaceSecretKey { + fn from(signing_key: SigningKey) -> Self { + Self(signing_key) + } +} + +impl PartialOrd for NamespacePublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for NamespacePublicKey { + fn cmp(&self, other: &Self) -> Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl PartialOrd for UserPublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for UserPublicKey { + fn cmp(&self, other: &Self) -> Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl From for NamespacePublicKey { + fn from(value: NamespaceSecretKey) -> Self { + value.public_key() + } +} + +impl From for UserPublicKey { + fn from(value: UserSecretKey) -> Self { + value.public_key() + } +} + +impl From<&NamespaceSecretKey> for NamespacePublicKey { + fn from(value: &NamespaceSecretKey) -> Self { + value.public_key() + } +} + +impl From<&UserSecretKey> for UserPublicKey { + fn from(value: &UserSecretKey) -> Self { + value.public_key() + } +} + +/// [`NamespacePublicKey`] in bytes +#[derive( + Default, + Clone, + Copy, + PartialOrd, + Ord, + Eq, + PartialEq, + Hash, + derive_more::From, + derive_more::Into, + derive_more::AsRef, + Serialize, + Deserialize, +)] +pub struct NamespaceId([u8; 32]); + +/// [`UserPublicKey`] in bytes +#[derive( + Default, + Clone, + Copy, + PartialOrd, + Ord, + Eq, + PartialEq, + Hash, + derive_more::From, + derive_more::Into, + derive_more::AsRef, + Serialize, + Deserialize, +)] +pub struct UserId([u8; 32]); + +impl UserId { + /// Convert to byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } + + /// Convert to byte slice. + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } + + // /// Convert into [`UserPublicKey`] by fetching from a [`PublicKeyStore`]. + // /// + // /// Fails if the bytes of this [`UserId`] are not a valid [`ed25519_dalek`] curve point. + // pub fn public_key( + // &self, + // store: &S, + // ) -> Result { + // store.author_key(self) + // } + + /// Convert into [`UserPublicKey`]. + /// + /// Fails if the bytes of this [`UserId`] are not a valid [`ed25519_dalek`] curve point. + pub fn into_public_key(&self) -> Result { + UserPublicKey::from_bytes(&self.0) + } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(self.0) + } +} + +impl NamespaceId { + /// Convert to byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } + + /// Convert to byte slice. + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } + + // /// Convert into [`NamespacePublicKey`] by fetching from a [`PublicKeyStore`]. + // /// + // /// Fails if the bytes of this [`NamespaceId`] are not a valid [`ed25519_dalek`] curve point. + // pub fn public_key( + // &self, + // store: &S, + // ) -> Result { + // store.namespace_key(self) + // } + + /// Convert into [`NamespacePublicKey`]. + /// + /// Fails if the bytes of this [`NamespaceId`] are not a valid [`ed25519_dalek`] curve point. + pub fn into_public_key(&self) -> Result { + NamespacePublicKey::from_bytes(&self.0) + } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(self.0) + } +} + +impl From<&[u8; 32]> for NamespaceId { + fn from(value: &[u8; 32]) -> Self { + Self(*value) + } +} + +impl From<&[u8; 32]> for UserId { + fn from(value: &[u8; 32]) -> Self { + Self(*value) + } +} + +impl AsRef<[u8]> for NamespaceId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl AsRef<[u8]> for UserId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl From for UserId { + fn from(value: UserPublicKey) -> Self { + Self(*value.as_bytes()) + } +} +impl From for NamespaceId { + fn from(value: NamespacePublicKey) -> Self { + Self(*value.as_bytes()) + } +} + +impl From<&UserPublicKey> for UserId { + fn from(value: &UserPublicKey) -> Self { + Self(*value.as_bytes()) + } +} +impl From<&NamespacePublicKey> for NamespaceId { + fn from(value: &NamespacePublicKey) -> Self { + Self(*value.as_bytes()) + } +} + +impl From for UserId { + fn from(value: UserSecretKey) -> Self { + value.id() + } +} +impl From for NamespaceId { + fn from(value: NamespaceSecretKey) -> Self { + value.id() + } +} + +impl TryFrom for NamespacePublicKey { + type Error = SignatureError; + fn try_from(value: NamespaceId) -> Result { + Self::from_bytes(&value.0) + } +} + +impl TryFrom for UserPublicKey { + type Error = SignatureError; + fn try_from(value: UserId) -> Result { + Self::from_bytes(&value.0) + } +} + +impl FromStr for UserId { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + UserPublicKey::from_str(s).map(|x| x.into()) + } +} + +impl FromStr for NamespaceId { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + NamespacePublicKey::from_str(s).map(|x| x.into()) + } +} + +pub fn is_communal(pubkey_bytes: &[u8]) -> bool { + let last = pubkey_bytes.last().expect("pubkey is not empty"); + // Check if last bit is 1. + (*last & 0x1) == 0x1 +} diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs new file mode 100644 index 0000000000..deb11fdf81 --- /dev/null +++ b/iroh-willow/src/proto/meadowcap.rs @@ -0,0 +1,241 @@ +use serde::{Deserialize, Serialize}; + +use super::{ + keys::{self, NamespaceSecretKey, PUBLIC_KEY_LENGTH}, + wgps::Area, + willow::{Entry, Unauthorised}, +}; + +pub type UserSignature = keys::Signature; +pub type UserPublicKey = keys::UserPublicKey; +pub type NamespacePublicKey = keys::NamespacePublicKey; +pub type NamespaceSignature = keys::Signature; + +pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { + let (capability, signature) = token.as_parts(); + + capability.is_valid() + && capability.access_mode() == AccessMode::Write + && capability.granted_area().includes_entry(entry) + && capability + .receiver() + .verify(&entry.encode(), signature) + .is_ok() +} + +#[derive(Debug, thiserror::Error)] +#[error("unauthorised")] +pub struct InvalidCapability; + +/// To be used as an AuthorisationToken for Willow. +#[derive(Debug, Serialize, Deserialize)] +pub struct MeadowcapAuthorisationToken { + /// Certifies that an Entry may be written. + capability: McCapability, + /// Proves that the Entry was created by the receiver of the capability. + signature: UserSignature, +} + +impl MeadowcapAuthorisationToken { + pub fn from_parts(capability: McCapability, signature: UserSignature) -> Self { + Self { + capability, + signature, + } + } + pub fn as_parts(&self) -> (&McCapability, &UserSignature) { + (&self.capability, &self.signature) + } + pub fn into_parts(self) -> (McCapability, UserSignature) { + (self.capability, self.signature) + } +} + +impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { + fn from((capability, signature): (McCapability, UserSignature)) -> Self { + Self::from_parts(capability, signature) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +pub enum McCapability { + Communal(CommunalCapability), + Owned(OwnedCapability), +} + +impl McCapability { + pub fn access_mode(&self) -> AccessMode { + match self { + Self::Communal(cap) => cap.access_mode, + Self::Owned(cap) => cap.access_mode, + } + } + pub fn receiver(&self) -> &UserPublicKey { + match self { + Self::Communal(cap) => cap.receiver(), + Self::Owned(cap) => cap.receiver(), + } + } + + pub fn granted_namespace(&self) -> &NamespacePublicKey { + match self { + Self::Communal(cap) => cap.granted_namespace(), + Self::Owned(cap) => cap.granted_namespace(), + } + } + + pub fn granted_area(&self) -> Area { + match self { + Self::Communal(cap) => cap.granted_area(), + Self::Owned(cap) => cap.granted_area(), + } + } + + pub fn try_granted_area(&self, area: &Area) -> Result<(), Unauthorised> { + if !self.granted_area().includes_area(area) { + Err(Unauthorised) + } else { + Ok(()) + } + } + + pub fn is_valid(&self) -> bool { + match self { + Self::Communal(cap) => cap.is_valid(), + Self::Owned(cap) => cap.is_valid(), + } + } + pub fn validate(&self) -> Result<(), InvalidCapability> { + match self.is_valid() { + true => Ok(()), + false => Err(InvalidCapability), + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq)] +pub enum AccessMode { + Read, + Write, +} + +/// A capability that implements communal namespaces. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct CommunalCapability { + /// The kind of access this grants. + access_mode: AccessMode, + /// The namespace in which this grants access. + namespace_key: NamespacePublicKey, + /// The subspace for which and to whom this grants access. + /// + /// Remember that we assume SubspaceId and UserPublicKey to be the same types. + user_key: UserPublicKey, + /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. + delegations: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +struct Delegation { + area: Area, + user_key: UserPublicKey, + signature: UserSignature, +} + +impl CommunalCapability { + pub fn receiver(&self) -> &UserPublicKey { + // TODO: support delegations + &self.user_key + } + + pub fn granted_namespace(&self) -> &NamespacePublicKey { + &self.namespace_key + } + + pub fn granted_area(&self) -> Area { + // TODO: support delegations + Area::subspace(self.user_key.into()) + } + + pub fn is_valid(&self) -> bool { + // TODO: support delegations + if !self.delegations.is_empty() { + return false; + } + // communal capabilities without delegations are always valid + true + } +} + +/// A capability that implements owned namespaces. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct OwnedCapability { + /// The kind of access this grants. + access_mode: AccessMode, + /// The namespace for which this grants access. + namespace_key: NamespacePublicKey, + /// The user to whom this grants access; granting access for the full namespace_key, not just to a subspace. + user_key: UserPublicKey, + /// Authorisation of the user_key by the namespace_key., + initial_authorisation: NamespaceSignature, + /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. + delegations: Vec<(Area, UserPublicKey, UserSignature)>, +} + +impl OwnedCapability { + // TODO: zerocopy? + pub fn signable( + access_mode: AccessMode, + user_key: &UserPublicKey, + ) -> [u8; PUBLIC_KEY_LENGTH + 1] { + let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; + signable[0] = match access_mode { + AccessMode::Read => 0x02, + AccessMode::Write => 0x03, + }; + signable[1..].copy_from_slice(user_key.as_bytes()); + signable + } + + pub fn new( + namespace_secret_key: &NamespaceSecretKey, + user_key: UserPublicKey, + access_mode: AccessMode, + ) -> Self { + let namespace_key = namespace_secret_key.public_key(); + let signable = Self::signable(access_mode, &user_key); + let initial_authorisation = namespace_secret_key.sign(&signable); + Self { + access_mode, + namespace_key, + user_key, + initial_authorisation, + delegations: Default::default(), + } + } + + pub fn receiver(&self) -> &UserPublicKey { + // TODO: support delegations + // self.delegations.last().map(|d| &d.user_key).unwrap_or(&self.user_key) + &self.user_key + } + + pub fn granted_namespace(&self) -> &NamespacePublicKey { + &self.namespace_key + } + + pub fn granted_area(&self) -> Area { + // TODO: support delegations + Area::full() + } + + pub fn is_valid(&self) -> bool { + // TODO: support delegations + if !self.delegations.is_empty() { + return false; + } + let signable = Self::signable(self.access_mode, &self.user_key); + self.namespace_key + .verify(&signable, &self.initial_authorisation) + .is_ok() + } +} diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs new file mode 100644 index 0000000000..12bc1991ce --- /dev/null +++ b/iroh-willow/src/proto/wgps.rs @@ -0,0 +1,472 @@ +use std::cmp::Ordering; + +use ed25519_dalek::Signature; +use iroh_net::key::PublicKey; +use serde::{Deserialize, Serialize}; + +use super::{ + keys, meadowcap, + willow::{ + AuthorisationToken, AuthorisedEntry, Entry, Path, PossiblyAuthorisedEntry, SubspaceId, + Timestamp, Unauthorised, + }, +}; + +pub const CHALLENGE_LENGTH: usize = 32; + +// In Meadowcap, for example, StaticToken is the type McCapability +// and DynamicToken is the type UserSignature, +// which together yield a MeadowcapAuthorisationToken. + +pub type StaticToken = meadowcap::McCapability; +pub type DynamicToken = meadowcap::UserSignature; + +/// Whereas write access control is baked into the Willow data model, +/// read access control resides in the replication layer. +/// To manage read access via capabilities, all peers must cooperate in sending Entries only to peers +/// who have presented a valid read capability for the Entry. +/// We describe the details in a capability-system-agnostic way here. +/// To use Meadowcap for this approach, simply choose the type of valid McCapabilities with access mode read as the read capabilities. +pub type ReadCapability = meadowcap::McCapability; +pub type SyncSignature = meadowcap::UserSignature; +pub type Receiver = meadowcap::UserPublicKey; + +/// The different resource handles employed by the WGPS. +#[derive(Debug, Serialize, Deserialize)] +pub enum HandleType { + /// Resource handle for the private set intersection part of private area intersection. + /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: + /// * pending (waiting for the other peer to perform scalar multiplication), + /// * completed (both peers performed scalar multiplication). + IntersectionHandle, + + /// Resource handle for ReadCapabilities that certify access to some Entries. + CapabilityHandle, + + /// Resource handle for AreaOfInterests that peers wish to sync. + AreaOfInterestHandle, + + /// Resource handle that controls the matching from Payload transmissions to Payload requests. + PayloadRequestHandle, + + /// Resource handle for StaticTokens that peers need to transmit. + StaticTokenHandle, +} + +/// The different logical channels employed by the WGPS. +#[derive(Debug, Serialize, Deserialize)] +pub enum LogicalChannel { + /// Logical channel for performing 3d range-based set reconciliation. + ReconciliationChannel, + + /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. + DataChannel, + + /// Logical channel for controlling the binding of new IntersectionHandles. + IntersectionChannel, + + /// Logical channel for controlling the binding of new CapabilityHandles. + CapabilityChannel, + + /// Logical channel for controlling the binding of new AreaOfInterestHandles. + AreaOfInterestChannel, + + /// Logical channel for controlling the binding of new PayloadRequestHandles. + PayloadRequestChannel, + + /// Logical channel for controlling the binding of new StaticTokenHandles. + StaticTokenChannel, +} + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct AreaOfInterestHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct IntersectionHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct CapabilityHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct StaticTokenHandle(u64); + +/// Complete the commitment scheme to determine the challenge for read authentication. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct CommitmentReveal { + /// The nonce of the sender, encoded as a big-endian unsigned integer. + nonce: [u8; CHALLENGE_LENGTH], +} + +// skip: Private Area Intersection + +/// A grouping of Entries that are among the newest in some store. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct AreaOfInterest { + /// To be included in this AreaOfInterest, an Entry must be included in the area. + pub area: Area, + /// To be included in this AreaOfInterest, an Entry’s timestamp must be among the max_count greatest Timestamps, unless max_count is zero. + pub max_count: u64, + /// The total payload_lengths of all included Entries is at most max_size, unless max_size is zero. + pub max_size: u64, +} + +/// A grouping of Entries. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct Area { + /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. + pub subspace_id: SubspaceIdOrAny, + /// To be included in this Area, an Entry’s path must be prefixed by the path. + pub path: Path, + /// To be included in this Area, an Entry’s timestamp must be included in the times. + pub times: Range, +} + +impl Area { + pub const fn new(subspace_id: SubspaceIdOrAny, path: Path, times: Range) -> Self { + Self { + subspace_id, + path, + times, + } + } + + pub fn full() -> Self { + Self::new( + SubspaceIdOrAny::Any, + Path::empty(), + Range::::FULL, + ) + } + + pub fn empty() -> Self { + Self::new( + SubspaceIdOrAny::Any, + Path::empty(), + Range::::EMPTY, + ) + } + + pub fn subspace(subspace_id: SubspaceId) -> Self { + Self::new( + SubspaceIdOrAny::Id(subspace_id), + Path::empty(), + Range::::FULL, + ) + } + + pub fn includes_entry(&self, entry: &Entry) -> bool { + self.subspace_id.includes_subspace(&entry.subspace_id) + && self.path.is_prefix_of(&entry.path) + && self.times.includes(&entry.timestamp) + } + + pub fn includes_area(&self, other: &Area) -> bool { + self.subspace_id.includes(&other.subspace_id) + && self.path.is_prefix_of(&other.path) + && self.times.includes_range(&other.times) + } + + pub fn includes_range(&self, range: &ThreeDRange) -> bool { + let path_start = self.path.is_prefix_of(&range.paths.start); + let path_end = match &range.paths.end { + RangeEnd::Open => true, + RangeEnd::Closed(path) => self.path.is_prefix_of(path), + }; + let subspace_start = self.subspace_id.includes_subspace(&range.subspaces.start); + let subspace_end = match range.subspaces.end { + RangeEnd::Open => true, + RangeEnd::Closed(subspace) => self.subspace_id.includes_subspace(&subspace), + }; + subspace_start + && subspace_end + && path_start + && path_end + && self.times.includes_range(&range.times) + } +} + +impl Range { + pub const FULL: Self = Self { + start: 0, + end: RangeEnd::Open, + }; + + pub const EMPTY: Self = Self { + start: 0, + end: RangeEnd::Closed(0), + }; +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub enum SubspaceIdOrAny { + Any, + Id(SubspaceId), +} + +impl SubspaceIdOrAny { + fn includes(&self, other: &SubspaceIdOrAny) -> bool { + match (self, other) { + (SubspaceIdOrAny::Any, SubspaceIdOrAny::Any) => true, + (SubspaceIdOrAny::Id(_), SubspaceIdOrAny::Any) => false, + (_, SubspaceIdOrAny::Id(id)) => self.includes_subspace(id), + } + } + fn includes_subspace(&self, subspace_id: &SubspaceId) -> bool { + match self { + Self::Any => true, + Self::Id(id) => id == subspace_id, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum Message { + CommitmentReveal(CommitmentReveal), + // PaiReplyFragment + // PaiBindFragment + // PaiRequestSubspaceCapability + // PaiReplySubspaceCapability + SetupBindStaticToken(SetupBindStaticToken), + SetupBindReadCapability(SetupBindReadCapability), + SetupBindAreaOfInterest(SetupBindAreaOfInterest), + ReconciliationSendFingerprint(ReconciliationSendFingerprint), + ReconciliationAnnounceEntries(ReconciliationAnnounceEntries), + ReconciliationSendEntry(ReconciliationSendEntry), + // DataSendEntry + // DataSendPayload + // DataSetMetadata + // DataBindPayloadRequest + // DataReplyPayload + ControlIssueGuarantee(ControlIssueGuarantee), + ControlAbsolve(ControlAbsolve), + ControlPlead(ControlPlead), + ControlAnnounceDropping(ControlAnnounceDropping), + ControlApologise(ControlApologise), + ControlFreeHandle(ControlFreeHandle), +} + +/// Bind a ReadCapability to a CapabilityHandle. +/// +/// The SetupBindReadCapability messages let peers bind a ReadCapability for later reference. +/// To do so, they must present a valid SyncSignature over their challenge, thus demonstrating +/// they hold the secret key corresponding to receiver of the ReadCapability. +/// +/// These requirements allow us to encode SetupBindReadCapability messages more efficiently. +/// The handle must be bound to the fragment (primary, if possible) of the capability with the +/// longest Path prefix that is in the intersection of the two peers’ fragments. +/// +/// SetupBindReadCapability messages use the CapabilityChannel. +#[derive(Debug, Serialize, Deserialize)] +pub struct SetupBindReadCapability { + /// A ReadCapability that the peer wishes to reference in future messages. + pub capability: ReadCapability, + + /// The IntersectionHandle, bound by the sender, of the capability’s fragment + /// with the longest Path in the intersection of the fragments. + /// + /// If both a primary and secondary such fragment exist, choose the primary one. + pub handle: IntersectionHandle, + + /// The SyncSignature issued by the Receiver of the capability over the sender’s challenge. + pub signature: SyncSignature, +} + +/// Bind an AreaOfInterest to an AreaOfInterestHandle. +#[derive(Debug, Serialize, Deserialize)] +pub struct SetupBindAreaOfInterest { + /// An AreaOfInterest that the peer wishes to reference in future messages. + pub area_of_interest: AreaOfInterest, + /// A CapabilityHandle bound by the sender that grants access to all entries in the message’s area_of_interest. + pub authorisation: CapabilityHandle, +} + +/// Bind a StaticToken to a StaticTokenHandle. +#[derive(Debug, Serialize, Deserialize)] +pub struct SetupBindStaticToken { + /// The StaticToken to bind. + pub static_token: StaticToken, +} + +/// Send a Fingerprint as part of 3d range-based set reconciliation. +#[derive(Debug, Serialize, Deserialize)] +pub struct ReconciliationSendFingerprint { + /// The 3dRange whose Fingerprint is transmitted. + pub range: ThreeDRange, + /// The Fingerprint of the range, that is, of all LengthyEntries the peer has in the range. + pub fingerprint: Fingerprint, + /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. + pub sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. + pub receiver_handle: AreaOfInterestHandle, +} + +/// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. +#[derive(Debug, Serialize, Deserialize)] +pub struct ReconciliationAnnounceEntries { + /// The 3dRange whose LengthyEntries to transmit. + pub range: ThreeDRange, + /// The number of Entries the sender has in the range. + pub count: u64, + /// A boolean flag to indicate whether the sender wishes to receive a ReconciliationAnnounceEntries message for the same 3dRange in return. + pub want_response: bool, + /// Whether the sender promises to send the Entries in the range sorted from oldest to newest. + pub will_sort: bool, + /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. + pub sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. + pub receiver_handle: AreaOfInterestHandle, +} + +/// Transmit a LengthyEntry as part of 3d range-based set reconciliation. +#[derive(Debug, Serialize, Deserialize)] +pub struct ReconciliationSendEntry { + /// The LengthyEntry itself. + pub entry: LengthyEntry, + /// A StaticTokenHandle, bound by the sender of this message, that is bound to the static part of the entry’s AuthorisationToken. + pub static_token_handle: StaticTokenHandle, + /// The dynamic part of the entry’s AuthorisationToken. + pub dynamic_token: DynamicToken, +} + +impl ReconciliationSendEntry { + pub fn into_authorised_entry( + self, + static_token: StaticToken, + ) -> Result { + let authorisation_token = AuthorisationToken::from_parts(static_token, self.dynamic_token); + let entry = PossiblyAuthorisedEntry::new(self.entry.entry, authorisation_token); + entry.authorise() + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct LengthyEntry { + /// The Entry in question. + pub entry: Entry, + /// The number of consecutive bytes from the start of the entry’s Payload that the peer holds. + pub available: u64, +} + +impl LengthyEntry { + pub fn new(entry: Entry, available: u64) -> Self { + Self { entry, available } + } +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct Fingerprint; + +impl Fingerprint { + pub fn is_empty(&self) -> bool { + false + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ThreeDRange { + paths: Range, + subspaces: Range, + times: Range, +} + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +pub struct Range { + start: T, + end: RangeEnd, +} + +impl Range { + pub fn is_closed(&self) -> bool { + matches!(self.end, RangeEnd::Closed(_)) + } + pub fn is_open(&self) -> bool { + matches!(self.end, RangeEnd::Open) + } +} + +impl Range { + pub fn includes(&self, value: &T) -> bool { + value >= &self.start && self.end.includes(value) + } + + pub fn includes_range(&self, other: &Range) -> bool { + self.start <= other.start && self.end >= other.end + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub enum RangeEnd { + Closed(T), + Open, +} + +impl PartialOrd for RangeEnd { + fn partial_cmp(&self, other: &Self) -> Option { + match (self, other) { + (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), + (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), + (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), + } + } +} + +impl RangeEnd { + pub fn includes(&self, value: &T) -> bool { + match self { + Self::Open => true, + Self::Closed(end) => value < end, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum ControlMessage {} + +/// Make a binding promise of available buffer capacity to the other peer +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlIssueGuarantee { + amount: u64, + channel: LogicalChannel, +} + +/// Allow the other peer to reduce its total buffer capacity by amount. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlAbsolve { + amount: u64, + channel: LogicalChannel, +} + +/// Ask the other peer to send an ControlAbsolve message +/// such that the receiver remaining guarantees will be target. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlPlead { + target: u64, + channel: LogicalChannel, +} + +/// The server notifies the client that it has started dropping messages and will continue +/// to do so until it receives an Apologise message. The server must send any outstanding +/// guarantees of the logical channel before sending a AnnounceDropping message. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlAnnounceDropping { + channel: LogicalChannel, +} + +/// The client notifies the server that it can stop dropping messages on this logical channel. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlApologise { + channel: LogicalChannel, +} + +/// Ask the other peer to free a resource handle. +/// +/// This is needed for symmetric protocols where peers act as both client and server simultaneously +/// and bind resource handles to the same handle types. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlFreeHandle { + handle: u64, + /// Indicates whether the peer sending this message is the one who created the handle (true) or not (false). + mine: bool, + handle_type: HandleType, +} diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs new file mode 100644 index 0000000000..c9665082c3 --- /dev/null +++ b/iroh-willow/src/proto/willow.rs @@ -0,0 +1,281 @@ +use std::{cmp::Ordering, sync::Arc}; + +use bytes::Bytes; +use iroh_base::hash::Hash; +use serde::{Deserialize, Serialize}; +use zerocopy::{native_endian::U64, FromBytes, IntoBytes, KnownLayout, NoCell, Unaligned}; + +use super::{ + keys::{self, PUBLIC_KEY_LENGTH}, + meadowcap::{self, is_authorised_write}, +}; + +pub type NamespaceId = keys::NamespaceId; +pub type SubspaceId = keys::UserId; +pub type Timestamp = u64; +pub type PayloadDigest = Hash; +pub type Component = Bytes; + +pub type AuthorisationToken = meadowcap::MeadowcapAuthorisationToken; + +/// A natural number for limiting the length of path components. +pub const MAX_COMPONENT_LENGTH: usize = 4096; +/// A natural number for limiting the number of path components. +pub const MAX_COMPONENT_COUNT: usize = 1024; +/// A natural number max_path_length for limiting the overall size of paths. +pub const MAX_PATH_LENGTH: usize = 4096; + +pub const DIGEST_LENGTH: usize = 32; + +/// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. +/// We can represent the length of any Component in path_length_power bytes. +/// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). +/// +/// The value `2` means that we can encode paths up to 64KiB long. +const PATH_LENGTH_POWER: usize = 2; +const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; + +type UPathLengthPower = u16; +type UPathCountPower = u16; + +/// Error returned for entries that are not authorised. +/// +/// See [`is_authorised_write`] for details. +#[derive(Debug, thiserror::Error)] +#[error("Entry is not authorised")] +pub struct Unauthorised; + +/// Error returned for invalid paths. +#[derive(Debug, thiserror::Error)] +#[error("Entry is not authorised")] +pub enum InvalidPath { + #[error("Component with index {0} exceeds the maximum component length")] + ComponentTooLong(usize), + #[error("The path exceeds the maximum component length")] + PathTooLong, + #[error("The path exceeds the maximum component count")] + TooManyComponents, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct Path(Arc<[Component]>); + +// TODO: zerocopy support for path +// #[allow(missing_debug_implementations)] +// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// #[repr(C, packed)] +// pub struct ComponentRef([u8]); +// +// #[allow(missing_debug_implementations)] +// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// #[repr(C, packed)] +// pub struct PathRef([ComponentRef]); +// pub struct PathRef<'a>(&'a [&'a [u8]]); +// impl<'a> AsRef> for Path { +// fn as_ref(&'a self) -> &'a PathRef<'a> { +// todo!() +// } +// } + +impl Path { + pub fn new(components: &[&[u8]]) -> Result { + Self::validate(components)?; + let path: Vec = components + .iter() + .map(|c| Bytes::copy_from_slice(c)) + .collect(); + let path: Arc<[Component]> = path.into(); + Ok(Path(path)) + } + + pub fn validate(components: &[&[u8]]) -> Result<(), InvalidPath> { + if components.len() > MAX_COMPONENT_COUNT { + return Err(InvalidPath::TooManyComponents); + } + let mut total_len = 0; + for (i, component) in components.iter().enumerate() { + let len = component.len(); + if len > MAX_COMPONENT_LENGTH { + return Err(InvalidPath::ComponentTooLong(i)); + } + total_len += len; + } + if total_len > MAX_PATH_LENGTH { + return Err(InvalidPath::PathTooLong); + } + Ok(()) + } + + /// A `Path` `s` is a prefix of a `Path` `t` if the first [`Component`]s of `t` are exactly the `Component`s of `s`. + pub fn is_prefix_of(&self, other: &Path) -> bool { + other.0.starts_with(&self.0) + } + + /// Create an empty path. + pub fn empty() -> Self { + Self(Arc::new([])) + } + + pub fn encoded_len(&self) -> usize { + let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; + let data_len = self.iter().map(Bytes::len).sum::(); + lengths_len + data_len + } + + /// Encode in the format for signatures into a mutable vector. + pub fn encode_into(&self, out: &mut Vec) { + let component_count = self.len() as UPathCountPower; + out.extend_from_slice(&component_count.to_be_bytes()); + for component in self.iter() { + let len = component.len() as UPathLengthPower; + out.extend_from_slice(&len.to_be_bytes()); + out.extend_from_slice(&component); + } + } + + pub fn encode(&self) -> Vec { + let mut out = Vec::with_capacity(self.encoded_len()); + self.encode_into(&mut out); + out + } +} + +impl std::ops::Deref for Path { + type Target = [Component]; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// The metadata for storing a Payload. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct Entry { + /// The identifier of the namespace to which the Entry belongs. + pub namespace_id: NamespaceId, + /// The identifier of the subspace to which the Entry belongs. + pub subspace_id: SubspaceId, + /// The Path to which the Entry was written. + pub path: Path, + /// The claimed creation time of the Entry. + /// + /// Wall-clock timestamps may come as a surprise. We are cognisant of their limitations, + /// and use them anyway. To learn why, please see Timestamps, really? + pub timestamp: Timestamp, + /// The length of the Payload in bytes. + pub payload_length: u64, + /// The result of applying hash_payload to the Payload. + pub payload_digest: PayloadDigest, +} + +impl PartialOrd for Entry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Entry { + fn cmp(&self, other: &Self) -> Ordering { + if other.timestamp < self.timestamp + || (other.timestamp == self.timestamp && other.payload_digest < self.payload_digest) + || (other.timestamp == self.timestamp + && other.payload_digest == self.payload_digest + && other.payload_length < self.payload_length) + { + Ordering::Greater + } else if self == other { + Ordering::Equal + } else { + Ordering::Less + } + } +} + +impl Entry { + pub fn is_newer_than(&self, other: &Entry) -> bool { + self > other + } + + /// Convert the entry to a byte slice. + /// + /// This is invoked to create the signable for signatures over the entry. Thus, any change in + /// the encoding format here will make existing signatures invalid. + /// + /// The encoding follows the [`Willow spec for encoding`](https://willowprotocol.org/specs/encodings/index.html#enc_entry). + // TODO: make sure that the encoding fits the spec + pub fn encode(&self) -> Vec { + let path_len = self.path.encoded_len(); + let len = PUBLIC_KEY_LENGTH + PUBLIC_KEY_LENGTH + path_len + 8 + 8 + DIGEST_LENGTH; + let mut out = Vec::with_capacity(len); + out.extend_from_slice(self.namespace_id.as_bytes()); + out.extend_from_slice(self.subspace_id.as_bytes()); + self.path.encode_into(&mut out); + out.extend_from_slice(&self.timestamp.to_be_bytes()); + out.extend_from_slice(&self.payload_length.to_be_bytes()); + out.extend_from_slice(self.payload_digest.as_bytes()); + out + } +} + +/// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. +#[derive(Debug, Serialize, Deserialize)] +pub struct PossiblyAuthorisedEntry(Entry, AuthorisationToken); + +impl PossiblyAuthorisedEntry { + pub fn new(entry: Entry, authorisation_token: AuthorisationToken) -> Self { + Self(entry, authorisation_token) + } + pub fn is_authorised(&self) -> bool { + is_authorised_write(&self.0, &self.1) + } + + pub fn authorise(self) -> Result { + match self.is_authorised() { + true => Ok(AuthorisedEntry(self.0, self.1)), + false => Err(Unauthorised), + } + } + + pub fn into_parts(self) -> (Entry, AuthorisationToken) { + (self.0, self.1) + } +} + +impl TryFrom for AuthorisedEntry { + type Error = Unauthorised; + fn try_from(value: PossiblyAuthorisedEntry) -> Result { + value.authorise() + } +} + +/// An AuthorisedEntry is a PossiblyAuthorisedEntry for which is_authorised_write returns true. +#[derive(Debug, Serialize, Deserialize)] +pub struct AuthorisedEntry(Entry, AuthorisationToken); + +impl AuthorisedEntry { + pub fn try_from_parts( + entry: Entry, + authorisation_token: AuthorisationToken, + ) -> Result { + PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() + } + + pub fn is_authorised(&self) -> bool { + true + } + + /// Warning: Use only if you can assure that the authorisation was previously checked! + pub fn from_parts_unchecked(entry: Entry, authorisation_token: AuthorisationToken) -> Self { + Self(entry, authorisation_token) + } + + pub fn into_parts(self) -> (Entry, AuthorisationToken) { + (self.0, self.1) + } +} + +// impl std::ops::Deref for AuthorisedEntry { +// type Target = PossiblyAuthorisedEntry; +// fn deref(&self) -> &Self::Target { +// &self.0 +// } +// } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs new file mode 100644 index 0000000000..b4edc375b0 --- /dev/null +++ b/iroh-willow/src/session.rs @@ -0,0 +1,471 @@ +use core::fmt; +use std::collections::{hash_map, HashMap, VecDeque}; + +use tracing::warn; + +use crate::{ + proto::{ + keys::NamespaceId, + meadowcap::is_authorised_write, + wgps::{ + Area, Fingerprint, IntersectionHandle, LengthyEntry, StaticTokenHandle, ThreeDRange, + }, + willow::{ + AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, + }, + }, + store::{RangeSplitPart, Store}, +}; + +use super::proto::wgps::{ + AreaOfInterest, AreaOfInterestHandle, CapabilityHandle, ControlAbsolve, + ControlAnnounceDropping, ControlApologise, ControlFreeHandle, ControlIssueGuarantee, + ControlPlead, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, + SetupBindStaticToken, StaticToken, +}; + +#[derive(Debug, derive_more::From, derive_more::TryInto)] +pub enum Message { + Control(ControlMessage), + Reconciliation(ReconciliationMessage), +} + +#[derive(Debug, derive_more::From)] +pub enum ControlMessage { + // TODO: move to CapabilityChannel + SetupBindReadCapability(SetupBindReadCapability), + // TODO: move to StaticTokenChannel + SetupBindStaticToken(SetupBindStaticToken), + // TODO: move to AreaOfInterestChannel + SetupBindAreaOfInterest(SetupBindAreaOfInterest), + // IssueGuarantee(ControlIssueGuarantee), + // Absolve(ControlAbsolve), + // Plead(ControlPlead), + // AnnounceDropping(ControlAnnounceDropping), + // Apologise(ControlApologise), + FreeHandle(ControlFreeHandle), +} + +#[derive(Debug, derive_more::From)] +pub enum ReconciliationMessage { + SendFingerprint(ReconciliationSendFingerprint), + AnnounceEntries(ReconciliationAnnounceEntries), + SendEntry(ReconciliationSendEntry), +} + +// struct HandleMap { +// next_handle: u64, +// map: HashMap, +// } +// impl HandleMap +// where +// R: std::hash::Hash + Eq, +// H: Handle, +// { +// pub fn bind(&mut self, value: R) -> (H, bool) { +// match self.map.entry(value) { +// hash_map::Entry::Occupied(handle) => (*handle.get(), false), +// hash_map::Entry::Vacant(entry) => { +// let handle: H = self.next_handle.into(); +// self.next_handle += 1; +// entry.insert(handle); +// (handle, true) +// } +// } +// } +// } + +#[derive(Debug, Default)] +struct ResourceMap { + next_handle: u64, + map: HashMap>, +} + +pub trait Handle: std::hash::Hash + From + Copy + Eq + PartialEq {} + +impl Handle for CapabilityHandle {} +impl Handle for StaticTokenHandle {} +impl Handle for AreaOfInterestHandle {} + +#[derive(Debug)] +enum ResourceState { + Active, + WeProposedFree, + ToBeDeleted, +} + +impl ResourceMap +where + H: Handle, + R: Eq + PartialEq, +{ + pub fn bind(&mut self, resource: R) -> H { + let handle: H = self.next_handle.into(); + self.next_handle += 1; + let resource = Resource::new(resource); + self.map.insert(handle, resource); + handle + } + + pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { + // TODO: Optimize / find out if reverse index is better than find_map + if let Some(handle) = self + .map + .iter() + .find_map(|(handle, r)| (r.value == resource).then_some(handle)) + { + (*handle, false) + } else { + let handle = self.bind(resource); + (handle, true) + } + } + + pub fn get(&self, handle: &H) -> Option<&R> { + self.map.get(handle).as_ref().map(|r| &r.value) + } + + pub fn try_get(&self, handle: &H) -> Result<&R, Error> { + self.get(handle).ok_or(Error::MissingResource) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("local store failed")] + Store(#[from] anyhow::Error), + #[error("missing resource")] + MissingResource, + #[error("missing resource")] + RangeOutsideCapability, + #[error("received a message that is not valid in the current session state")] + InvalidMessageInCurrentState, + #[error("our and their area of interests refer to different namespaces")] + AreaOfInterestNamespaceMismatch, + #[error("received an entry which is not authorised")] + UnauthorisedEntryReceived, +} + +impl From for Error { + fn from(_value: Unauthorised) -> Self { + Self::UnauthorisedEntryReceived + } +} + +#[derive(Debug)] +struct Resource { + value: V, + state: ResourceState, + unprocessed_messages: usize, +} +impl Resource { + pub fn new(value: V) -> Self { + Self { + value, + state: ResourceState::Active, + unprocessed_messages: 0, + } + } +} + +pub const CHALLENGE_LENGTH: usize = 32; +pub type Challenge = [u8; CHALLENGE_LENGTH]; + +#[derive(Debug)] +pub enum Role { + Betty, + Alfie, +} + +#[derive(Debug)] +pub struct Session { + our_role: Role, + + control_channel: Channel, + reconciliation_channel: Channel, + + us: PeerState, + them: PeerState, +} + +#[derive(Debug, PartialEq, Eq)] +struct BoundAreaOfInterest { + area_of_interest: AreaOfInterest, + authorisation: CapabilityHandle, + namespace: NamespaceId, +} + +impl BoundAreaOfInterest { + pub fn includes_range(&self, range: &ThreeDRange) -> bool { + self.area_of_interest.area.includes_range(range) + } +} + +#[derive(Debug)] +pub struct PeerState { + challenge: Challenge, + capabilities: ResourceMap, + areas_of_interest: ResourceMap, + static_tokens: ResourceMap, + reconciliation_announce_entries: Option, // intersections: ResourceMap, +} + +impl Session { + pub fn recv(&mut self, message: Message) { + match message { + Message::Control(msg) => self.control_channel.inbox_push_or_drop(msg), + Message::Reconciliation(msg) => self.reconciliation_channel.inbox_push_or_drop(msg), + } + } + + pub fn pop_send(&mut self) -> Option { + if let Some(message) = self.control_channel.outbox.pop_front() { + return Some(message.into()); + }; + if let Some(message) = self.reconciliation_channel.outbox.pop_front() { + return Some(message.into()); + }; + None + } + + pub fn process(&mut self, store: &mut S) { + while let Some(message) = self.control_channel.inbox_pop() { + self.process_control(message).ok(); + } + while let Some(message) = self.reconciliation_channel.inbox_pop() { + self.process_reconciliation(message, store).ok(); + } + } + + pub fn process_control(&mut self, message: ControlMessage) -> anyhow::Result<()> { + match message { + ControlMessage::SetupBindReadCapability(msg) => { + msg.capability.validate()?; + msg.capability + .receiver() + .verify(&self.us.challenge, &msg.signature)?; + // todo: validate intersection handle + self.them.capabilities.bind(msg.capability); + } + ControlMessage::SetupBindStaticToken(msg) => { + self.them.static_tokens.bind(msg.static_token); + } + ControlMessage::SetupBindAreaOfInterest(msg) => { + let capability = self.them.capabilities.try_get(&msg.authorisation)?; + capability.try_granted_area(&msg.area_of_interest.area)?; + let bound_aoi = BoundAreaOfInterest { + area_of_interest: msg.area_of_interest, + authorisation: msg.authorisation, + namespace: capability.granted_namespace().into(), + }; + // let namespace = capability.granted_namespace(); + self.them.areas_of_interest.bind(bound_aoi); + } + ControlMessage::FreeHandle(_msg) => { + // TODO: Free handles + } + } + Ok(()) + } + + pub fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { + let (handle, is_new) = self.us.static_tokens.bind_if_new(static_token.clone()); + if is_new { + let msg = SetupBindStaticToken { static_token }; + self.control_channel + .send(ControlMessage::SetupBindStaticToken(msg)); + } + handle + } + + /// Uses the blocking [`Store`] and thus may only be called in the worker thread. + pub fn process_reconciliation( + &mut self, + message: ReconciliationMessage, + store: &mut S, + ) -> Result<(), Error> { + match message { + ReconciliationMessage::SendFingerprint(msg) => { + let ReconciliationSendFingerprint { + range, + fingerprint, + sender_handle, + receiver_handle, + } = msg; + + let namespace = self.authorise_range(&range, &receiver_handle, &sender_handle)?; + let our_fingerprint = store.get_fingerprint(namespace, &range)?; + + // case 1: fingerprint match. + if our_fingerprint == fingerprint { + let msg = ReconciliationAnnounceEntries { + range, + count: 0, + want_response: false, + will_sort: false, + sender_handle, + receiver_handle, + }; + self.reconciliation_channel + .send(ReconciliationMessage::AnnounceEntries(msg)); + } else { + for part in store.split_range(namespace, &range)?.into_iter() { + match part { + RangeSplitPart::SendFingerprint(range, fingerprint) => { + let msg = ReconciliationSendFingerprint { + range, + fingerprint, + sender_handle, + receiver_handle, + }; + self.reconciliation_channel + .send(ReconciliationMessage::SendFingerprint(msg)); + } + RangeSplitPart::SendEntries(range, local_count) => { + let msg = ReconciliationAnnounceEntries { + range: range.clone(), + count: local_count, + want_response: true, + will_sort: false, // todo: sorted? + sender_handle, + receiver_handle, + }; + self.reconciliation_channel.send(msg.into()); + for authorised_entry in + store.get_entries_with_authorisation(namespace, &range) + { + let authorised_entry = authorised_entry?; + let (entry, token) = authorised_entry.into_parts(); + let (static_token, dynamic_token) = token.into_parts(); + // todo: partial entries + let available = entry.payload_length; + let static_token_handle = self.bind_static_token(static_token); + let msg = ReconciliationSendEntry { + entry: LengthyEntry::new(entry, available), + static_token_handle, + dynamic_token, + }; + self.reconciliation_channel.send(msg.into()); + } + } + } + } + } + } + ReconciliationMessage::AnnounceEntries(msg) => { + if self.them.reconciliation_announce_entries.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + self.authorise_range(&msg.range, &msg.receiver_handle, &msg.sender_handle)?; + if msg.count == 0 { + // todo: what do we need to do here? + } else { + self.them.reconciliation_announce_entries = Some(msg) + } + } + ReconciliationMessage::SendEntry(msg) => { + let state = self + .them + .reconciliation_announce_entries + .as_mut() + .ok_or(Error::InvalidMessageInCurrentState)?; + let ReconciliationSendEntry { + entry, + static_token_handle, + dynamic_token, + } = msg; + let static_token = self.them.static_tokens.try_get(&static_token_handle)?; + // TODO: omit clone of static token? + let authorisation_token = + AuthorisationToken::from_parts(static_token.clone(), dynamic_token); + let authorised_entry = + AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; + store.ingest_entry(&authorised_entry)?; + + state.count -= 1; + if state.count == 0 { + self.them.reconciliation_announce_entries = None; + } + } + } + Ok(()) + } + + fn authorise_range( + &self, + range: &ThreeDRange, + receiver_handle: &AreaOfInterestHandle, + sender_handle: &AreaOfInterestHandle, + ) -> Result { + let ours = self.us.areas_of_interest.try_get(&receiver_handle)?; + let theirs = self.them.areas_of_interest.try_get(&sender_handle)?; + if !ours.includes_range(&range) || !theirs.includes_range(&range) { + return Err(Error::RangeOutsideCapability); + }; + if ours.namespace != theirs.namespace { + return Err(Error::AreaOfInterestNamespaceMismatch); + } + Ok(ours.namespace) + } +} +enum Scope { + Us, + Them, +} + +#[derive(Debug)] +pub struct Channel { + inbox: VecDeque, + outbox: VecDeque, + // issued_guarantees: usize, +} + +impl Channel { + pub fn with_capacity(cap: usize) -> Self { + Self { + inbox: VecDeque::with_capacity(cap), + outbox: VecDeque::with_capacity(cap), + } + } + + pub fn send(&mut self, value: T) -> bool { + self.outbox.push_back(value); + self.has_capacity() + } + + pub fn inbox_pop(&mut self) -> Option { + self.inbox.pop_front() + } + + pub fn inbox_push_or_drop(&mut self, message: T) { + if let Some(dropped) = self.inbox_push(message) { + warn!(message=?dropped, "dropping message"); + } + } + pub fn inbox_push(&mut self, message: T) -> Option { + if self.has_capacity() { + self.inbox.push_back(message); + None + } else { + Some(message) + } + } + pub fn remaining_capacity(&self) -> usize { + self.inbox.capacity() - self.inbox.len() + } + + pub fn has_capacity(&self) -> bool { + self.remaining_capacity() > 0 + } + + // pub fn issuable_guarantees(&self) -> usize { + // self.remaining_capacity() - self.issued_guarantees + // } + // + // pub fn offer_guarantees(&mut self) -> usize { + // let val = self.issuable_guarantees(); + // self.issued_guarantees += val; + // val + // } +} From e8064d5c108d90f6effc82c1e628f95458464413 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 23 Apr 2024 01:01:04 +0200 Subject: [PATCH 003/198] wip: progress --- iroh-willow/src/lib.rs | 4 +- iroh-willow/src/net.rs | 84 +++++ iroh-willow/src/net/codec.rs | 56 +++ iroh-willow/src/proto.rs | 4 +- iroh-willow/src/proto/keys.rs | 8 + iroh-willow/src/proto/meadowcap.rs | 18 + iroh-willow/src/proto/wgps.rs | 281 ++++++++++++--- iroh-willow/src/proto/willow.rs | 39 ++- iroh-willow/src/session.rs | 529 +++++++++++++++++++++-------- iroh-willow/src/store.rs | 105 ++++++ 10 files changed, 936 insertions(+), 192 deletions(-) create mode 100644 iroh-willow/src/net.rs create mode 100644 iroh-willow/src/net/codec.rs create mode 100644 iroh-willow/src/store.rs diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 5aa433ef94..06567a4353 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -2,7 +2,7 @@ #![allow(missing_docs, unused_imports, dead_code)] -pub mod session; -pub mod proto; pub mod net; +pub mod proto; +pub mod session; pub mod store; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs new file mode 100644 index 0000000000..0496d55653 --- /dev/null +++ b/iroh-willow/src/net.rs @@ -0,0 +1,84 @@ +use anyhow::ensure; +use futures::SinkExt; +use iroh_base::hash::Hash; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio_stream::StreamExt; +use tokio_util::codec::{FramedRead, FramedWrite}; + +use crate::{ + proto::wgps::{ + AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, CHALLENGE_LENGTH, + MAXIMUM_PAYLOAD_SIZE_POWER, + }, + session::{Role, Session, SessionInit}, + store::MemoryStore, +}; + +use self::codec::WillowCodec; + +pub mod codec; + +async fn run(conn: quinn::Connection, our_role: Role, init: SessionInit) -> anyhow::Result<()> { + let (mut send, mut recv) = match our_role { + Role::Alfie => conn.open_bi().await?, + Role::Betty => conn.accept_bi().await?, + }; + + let our_nonce: AccessChallenge = rand::random(); + let (received_commitment, maximum_payload_size) = + exchange_commitments(&mut send, &mut recv, &our_nonce).await?; + + let mut session = Session::new( + our_role, + our_nonce, + maximum_payload_size, + received_commitment, + init, + ); + + let mut reader = FramedRead::new(recv, WillowCodec); + let mut writer = FramedWrite::new(send, WillowCodec); + + let mut store = MemoryStore::default(); + + while let Some(message) = reader.try_next().await? { + // TODO: buffer more than a single message here before handing off to store thread + // what we should do here: + // * notify store thread that we want to process + // * keep reading and pushing into session, until session is full + // * once store thread is ready for us: be notified of that, and hand over session to store + // thread + session.recv(message.into()); + + // move to store thread for this! + session.process(&mut store)?; + + // back in network land: send out everything + // should be in parallel with reading + for message in session.drain_outbox() { + writer.send(message).await?; + } + } + Ok(()) +} + +async fn exchange_commitments( + send: &mut quinn::SendStream, + recv: &mut quinn::RecvStream, + our_nonce: &AccessChallenge, +) -> anyhow::Result<(ChallengeHash, usize)> { + let challenge_hash = Hash::new(&our_nonce); + send.write_u8(MAXIMUM_PAYLOAD_SIZE_POWER).await?; + send.write_all(challenge_hash.as_bytes()).await?; + + let their_maximum_payload_size_power = recv.read_u8().await?; + ensure!( + their_maximum_payload_size_power <= 64, + "maximum payload size too large" + ); + let their_maximum_payload_size = 2usize.pow(their_maximum_payload_size_power as u32); + + let mut received_commitment = [0u8; CHALLENGE_HASH_LENGTH]; + recv.read_exact(&mut received_commitment).await?; + Ok((received_commitment, their_maximum_payload_size)) +} diff --git a/iroh-willow/src/net/codec.rs b/iroh-willow/src/net/codec.rs new file mode 100644 index 0000000000..288820546d --- /dev/null +++ b/iroh-willow/src/net/codec.rs @@ -0,0 +1,56 @@ +use anyhow::ensure; +use bytes::{Buf, BufMut, BytesMut}; +use tokio_util::codec::{Decoder, Encoder}; + +use crate::proto::wgps::Message; + +#[derive(Debug, Default)] +pub struct WillowCodec; + +const MAX_MESSAGE_SIZE: usize = 1024 * 1024 * 1024; // This is likely too large, but lets have some restrictions + +impl Decoder for WillowCodec { + type Item = Message; + type Error = anyhow::Error; + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.len() < 4 { + return Ok(None); + } + let bytes: [u8; 4] = src[..4].try_into().unwrap(); + let frame_len = u32::from_be_bytes(bytes) as usize; + ensure!( + frame_len <= MAX_MESSAGE_SIZE, + "received message that is too large: {}", + frame_len + ); + if src.len() < 4 + frame_len { + return Ok(None); + } + + let message: Message = postcard::from_bytes(&src[4..4 + frame_len])?; + src.advance(4 + frame_len); + Ok(Some(message)) + } +} + +impl Encoder for WillowCodec { + type Error = anyhow::Error; + + fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), Self::Error> { + let len = + postcard::serialize_with_flavor(&item, postcard::ser_flavors::Size::default()).unwrap(); + ensure!( + len <= MAX_MESSAGE_SIZE, + "attempting to send message that is too large {}", + len + ); + + dst.put_u32(u32::try_from(len).expect("already checked")); + if dst.len() < 4 + len { + dst.resize(4 + len, 0u8); + } + postcard::to_slice(&item, &mut dst[4..])?; + + Ok(()) + } +} diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 8ccb4c7880..204cf69956 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,4 +1,4 @@ +pub mod keys; +pub mod meadowcap; pub mod wgps; pub mod willow; -pub mod meadowcap; -pub mod keys; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 79abafdf37..a5b59db1c6 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -418,6 +418,14 @@ impl UserId { pub fn fmt_short(&self) -> String { base32::fmt_short(self.0) } + + pub fn zero() -> Self { + Self([0u8; 32]) + } + + pub fn from_bytes_unchecked(bytes: [u8; 32]) -> Self { + Self(bytes) + } } impl NamespaceId { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index deb11fdf81..c72841c1c8 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -36,6 +36,24 @@ pub struct MeadowcapAuthorisationToken { signature: UserSignature, } +// /// To be used as an AuthorisationToken for Willow. +// #[derive(Debug, Serialize, Deserialize)] +// pub struct MeadowcapAuthorisationTokenRef<'a> { +// /// Certifies that an Entry may be written. +// capability: &'a McCapability, +// /// Proves that the Entry was created by the receiver of the capability. +// signature: &'a UserSignature, +// } +// +// impl<'a> AsRef> for MeadowcapAuthorisationToken { +// fn as_ref(&self) -> &MeadowcapAuthorisationTokenRef { +// &MeadowcapAuthorisationTokenRef { +// capability: &self.capability, +// signature: &self.signature, +// } +// } +// } + impl MeadowcapAuthorisationToken { pub fn from_parts(capability: McCapability, signature: UserSignature) -> Self { Self { diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 12bc1991ce..5750c7ff27 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,5 +1,6 @@ use std::cmp::Ordering; +use bytes::Bytes; use ed25519_dalek::Signature; use iroh_net::key::PublicKey; use serde::{Deserialize, Serialize}; @@ -8,11 +9,22 @@ use super::{ keys, meadowcap, willow::{ AuthorisationToken, AuthorisedEntry, Entry, Path, PossiblyAuthorisedEntry, SubspaceId, - Timestamp, Unauthorised, + Timestamp, Unauthorised, DIGEST_LENGTH, }, }; +pub const MAXIMUM_PAYLOAD_SIZE_POWER: u8 = 12; +/// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: +/// when an Entry’s payload_length is strictly greater than the maximum payload size, +/// its Payload may only be transmitted when explicitly requested. +/// +/// The value is 4096. +pub const MAXIMUM_PAYLOAD_SIZE: usize = 2usize.pow(MAXIMUM_PAYLOAD_SIZE_POWER as u32); + pub const CHALLENGE_LENGTH: usize = 32; +pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; +pub type ChallengeHash = [u8; CHALLENGE_HASH_LENGTH]; +pub type AccessChallenge = [u8; CHALLENGE_LENGTH]; // In Meadowcap, for example, StaticToken is the type McCapability // and DynamicToken is the type UserSignature, @@ -40,42 +52,44 @@ pub enum HandleType { /// * completed (both peers performed scalar multiplication). IntersectionHandle, - /// Resource handle for ReadCapabilities that certify access to some Entries. + /// Resource handle for [`ReadCapability`] that certify access to some Entries. CapabilityHandle, - /// Resource handle for AreaOfInterests that peers wish to sync. + /// Resource handle for [`AreaOfInterest`]s that peers wish to sync. AreaOfInterestHandle, /// Resource handle that controls the matching from Payload transmissions to Payload requests. PayloadRequestHandle, - /// Resource handle for StaticTokens that peers need to transmit. + /// Resource handle for [`StaticToken`]s that peers need to transmit. StaticTokenHandle, } /// The different logical channels employed by the WGPS. #[derive(Debug, Serialize, Deserialize)] pub enum LogicalChannel { + /// Control channel + ControlChannel, /// Logical channel for performing 3d range-based set reconciliation. ReconciliationChannel, - - /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. - DataChannel, - - /// Logical channel for controlling the binding of new IntersectionHandles. - IntersectionChannel, - - /// Logical channel for controlling the binding of new CapabilityHandles. - CapabilityChannel, - - /// Logical channel for controlling the binding of new AreaOfInterestHandles. - AreaOfInterestChannel, - - /// Logical channel for controlling the binding of new PayloadRequestHandles. - PayloadRequestChannel, - - /// Logical channel for controlling the binding of new StaticTokenHandles. - StaticTokenChannel, + // TODO: actually use more channels + // /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. + // DataChannel, + // + // /// Logical channel for controlling the binding of new IntersectionHandles. + // IntersectionChannel, + // + // /// Logical channel for controlling the binding of new CapabilityHandles. + // CapabilityChannel, + // + // /// Logical channel for controlling the binding of new AreaOfInterestHandles. + // AreaOfInterestChannel, + // + // /// Logical channel for controlling the binding of new PayloadRequestHandles. + // PayloadRequestChannel, + // + // /// Logical channel for controlling the binding of new StaticTokenHandles. + // StaticTokenChannel, } #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] @@ -90,17 +104,42 @@ pub struct CapabilityHandle(u64); #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] pub struct StaticTokenHandle(u64); +pub trait Handle: std::hash::Hash + From + Copy + Eq + PartialEq { + fn handle_type(&self) -> HandleType; +} + +impl Handle for CapabilityHandle { + fn handle_type(&self) -> HandleType { + HandleType::CapabilityHandle + } +} +impl Handle for StaticTokenHandle { + fn handle_type(&self) -> HandleType { + HandleType::StaticTokenHandle + } +} +impl Handle for AreaOfInterestHandle { + fn handle_type(&self) -> HandleType { + HandleType::AreaOfInterestHandle + } +} +impl Handle for IntersectionHandle { + fn handle_type(&self) -> HandleType { + HandleType::IntersectionHandle + } +} + /// Complete the commitment scheme to determine the challenge for read authentication. #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct CommitmentReveal { /// The nonce of the sender, encoded as a big-endian unsigned integer. - nonce: [u8; CHALLENGE_LENGTH], + pub nonce: AccessChallenge, } // skip: Private Area Intersection /// A grouping of Entries that are among the newest in some store. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub struct AreaOfInterest { /// To be included in this AreaOfInterest, an Entry must be included in the area. pub area: Area, @@ -110,11 +149,21 @@ pub struct AreaOfInterest { pub max_size: u64, } +impl AreaOfInterest { + pub fn full() -> Self { + Self { + area: Area::full(), + max_count: 0, + max_size: 0, + } + } +} + /// A grouping of Entries. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct Area { /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. - pub subspace_id: SubspaceIdOrAny, + pub subspace_id: SubspaceArea, /// To be included in this Area, an Entry’s path must be prefixed by the path. pub path: Path, /// To be included in this Area, an Entry’s timestamp must be included in the times. @@ -122,7 +171,7 @@ pub struct Area { } impl Area { - pub const fn new(subspace_id: SubspaceIdOrAny, path: Path, times: Range) -> Self { + pub const fn new(subspace_id: SubspaceArea, path: Path, times: Range) -> Self { Self { subspace_id, path, @@ -131,24 +180,16 @@ impl Area { } pub fn full() -> Self { - Self::new( - SubspaceIdOrAny::Any, - Path::empty(), - Range::::FULL, - ) + Self::new(SubspaceArea::Any, Path::empty(), Range::::FULL) } pub fn empty() -> Self { - Self::new( - SubspaceIdOrAny::Any, - Path::empty(), - Range::::EMPTY, - ) + Self::new(SubspaceArea::Any, Path::empty(), Range::::EMPTY) } pub fn subspace(subspace_id: SubspaceId) -> Self { Self::new( - SubspaceIdOrAny::Id(subspace_id), + SubspaceArea::Id(subspace_id), Path::empty(), Range::::FULL, ) @@ -183,6 +224,95 @@ impl Area { && path_end && self.times.includes_range(&range.times) } + + pub fn into_range(&self) -> ThreeDRange { + let subspace_start = match self.subspace_id { + SubspaceArea::Any => SubspaceId::zero(), + SubspaceArea::Id(id) => id, + }; + let subspace_end = match self.subspace_id { + SubspaceArea::Any => RangeEnd::Open, + SubspaceArea::Id(id) => subspace_range_end(id), + }; + let path_start = self.path.clone(); + let path_end = path_range_end(&self.path); + ThreeDRange { + subspaces: Range::new(subspace_start, subspace_end), + paths: Range::new(path_start, path_end), + times: self.times.clone(), + } + } + + pub fn intersection(&self, other: &Area) -> Option { + let subspace_id = self.subspace_id.intersection(&other.subspace_id)?; + let path = self.path.intersection(&other.path)?; + let times = self.times.intersection(&other.times)?; + Some(Self { + subspace_id, + times, + path, + }) + } +} + +fn path_range_end(path: &Path) -> RangeEnd { + if path.is_empty() { + RangeEnd::Open + } else { + let mut out = vec![]; + for component in path.iter().rev() { + // component can be incremented + if out.is_empty() && component.iter().any(|x| *x != 0xff) { + let mut bytes = Vec::with_capacity(component.len()); + bytes.copy_from_slice(&component); + let incremented = increment_by_one(&mut bytes); + debug_assert!(incremented, "checked above"); + out.push(Bytes::from(bytes)); + break; + // component cannot be incremented + } else if out.is_empty() { + continue; + } else { + out.push(component.clone()) + } + } + if out.is_empty() { + RangeEnd::Open + } else { + out.reverse(); + RangeEnd::Closed(Path::from_bytes_unchecked(out)) + } + } + // let mut bytes = id.to_bytes(); + // if increment_by_one(&mut bytes) { + // RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) + // } else { + // RangeEnd::Open + // } +} + +fn subspace_range_end(id: SubspaceId) -> RangeEnd { + let mut bytes = id.to_bytes(); + if increment_by_one(&mut bytes) { + RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) + } else { + RangeEnd::Open + } +} + +/// Increment a byte string by one, by incrementing the last byte that is not 255 by one. +/// +/// Returns false if all bytes are 255. +fn increment_by_one(value: &mut [u8]) -> bool { + for char in value.iter_mut().rev() { + if *char != 255 { + *char += 1; + return true; + } else { + *char = 0; + } + } + false } impl Range { @@ -195,20 +325,35 @@ impl Range { start: 0, end: RangeEnd::Closed(0), }; + + fn intersection(&self, other: &Self) -> Option { + let start = self.start.max(other.start); + let end = match (&self.end, &other.end) { + (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(*b), + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(*a.min(b)), + (RangeEnd::Closed(a), RangeEnd::Open) => RangeEnd::Closed(*a), + (RangeEnd::Open, RangeEnd::Open) => RangeEnd::Open, + }; + match end { + RangeEnd::Open => Some(Self::new(start, end)), + RangeEnd::Closed(t) if t >= start => Some(Self::new(start, end)), + RangeEnd::Closed(_) => Some(Self::new(start, end)), + } + } } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub enum SubspaceIdOrAny { +pub enum SubspaceArea { Any, Id(SubspaceId), } -impl SubspaceIdOrAny { - fn includes(&self, other: &SubspaceIdOrAny) -> bool { +impl SubspaceArea { + fn includes(&self, other: &SubspaceArea) -> bool { match (self, other) { - (SubspaceIdOrAny::Any, SubspaceIdOrAny::Any) => true, - (SubspaceIdOrAny::Id(_), SubspaceIdOrAny::Any) => false, - (_, SubspaceIdOrAny::Id(id)) => self.includes_subspace(id), + (SubspaceArea::Any, SubspaceArea::Any) => true, + (SubspaceArea::Id(_), SubspaceArea::Any) => false, + (_, SubspaceArea::Id(id)) => self.includes_subspace(id), } } fn includes_subspace(&self, subspace_id: &SubspaceId) -> bool { @@ -217,9 +362,19 @@ impl SubspaceIdOrAny { Self::Id(id) => id == subspace_id, } } + + fn intersection(&self, other: &Self) -> Option { + match (self, other) { + (Self::Any, Self::Any) => Some(Self::Any), + (Self::Id(a), Self::Any) => Some(Self::Id(*a)), + (Self::Any, Self::Id(b)) => Some(Self::Id(*b)), + (Self::Id(a), Self::Id(b)) if a == b => Some(Self::Id(*a)), + (Self::Id(_a), Self::Id(_b)) => None, + } + } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, derive_more::From)] pub enum Message { CommitmentReveal(CommitmentReveal), // PaiReplyFragment @@ -245,6 +400,17 @@ pub enum Message { ControlFreeHandle(ControlFreeHandle), } +impl Message { + pub fn logical_channel(&self) -> LogicalChannel { + match self { + Message::ReconciliationSendFingerprint(_) + | Message::ReconciliationAnnounceEntries(_) + | Message::ReconciliationSendEntry(_) => LogicalChannel::ReconciliationChannel, + _ => LogicalChannel::ControlChannel, + } + } +} + /// Bind a ReadCapability to a CapabilityHandle. /// /// The SetupBindReadCapability messages let peers bind a ReadCapability for later reference. @@ -272,7 +438,7 @@ pub struct SetupBindReadCapability { } /// Bind an AreaOfInterest to an AreaOfInterestHandle. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] pub struct SetupBindAreaOfInterest { /// An AreaOfInterest that the peer wishes to reference in future messages. pub area_of_interest: AreaOfInterest, @@ -280,6 +446,12 @@ pub struct SetupBindAreaOfInterest { pub authorisation: CapabilityHandle, } +impl SetupBindAreaOfInterest { + pub fn area(&self) -> &Area { + &self.area_of_interest.area + } +} + /// Bind a StaticToken to a StaticTokenHandle. #[derive(Debug, Serialize, Deserialize)] pub struct SetupBindStaticToken { @@ -354,12 +526,9 @@ impl LengthyEntry { } #[derive(Debug, Serialize, Deserialize, Eq, PartialEq)] -pub struct Fingerprint; +pub struct Fingerprint([u8; 32]); impl Fingerprint { - pub fn is_empty(&self) -> bool { - false - } } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -376,6 +545,9 @@ pub struct Range { } impl Range { + pub fn new(start: T, end: RangeEnd) -> Self { + Self { start, end } + } pub fn is_closed(&self) -> bool { matches!(self.end, RangeEnd::Closed(_)) } @@ -411,6 +583,17 @@ impl PartialOrd for RangeEnd { } } +// impl PartialOrd for RangeEnd { +// fn partial_cmp(&self, other: &T) -> Option { +// // match (self, other) { +// // (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), +// // (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), +// // (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), +// // (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), +// // } +// } +// } + impl RangeEnd { pub fn includes(&self, value: &T) -> bool { match self { diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index c9665082c3..e467c06074 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -17,6 +17,7 @@ pub type PayloadDigest = Hash; pub type Component = Bytes; pub type AuthorisationToken = meadowcap::MeadowcapAuthorisationToken; +// pub type AuthorisationTokenRef<'a> = meadowcap::MeadowcapAuthorisationTokenRef; /// A natural number for limiting the length of path components. pub const MAX_COMPONENT_LENGTH: usize = 4096; @@ -80,12 +81,16 @@ pub struct Path(Arc<[Component]>); impl Path { pub fn new(components: &[&[u8]]) -> Result { Self::validate(components)?; - let path: Vec = components + let components: Vec = components .iter() .map(|c| Bytes::copy_from_slice(c)) .collect(); - let path: Arc<[Component]> = path.into(); - Ok(Path(path)) + Ok(Self::from_bytes_unchecked(components)) + } + + pub fn from_bytes_unchecked(components: Vec) -> Self { + let path: Arc<[Component]> = components.into(); + Path(path) } pub fn validate(components: &[&[u8]]) -> Result<(), InvalidPath> { @@ -138,6 +143,34 @@ impl Path { self.encode_into(&mut out); out } + + pub fn intersection(&self, other: &Path) -> Option { + if self.is_prefix_of(other) { + Some(self.clone()) + } else if other.is_prefix_of(self) { + Some(other.clone()) + } else { + None + } + // if self == other { + // Some(self.clone()) + // } else { + // let mut out = Vec::new(); + // for (a, b) in self.iter().zip(other.iter()) { + // if a == b { + // out.push(a.clone()); + // } else { + // break; + // } + // } + // if out.is_empty() { + // None + // } else { + // Some(Path::from_bytes_unchecked(out)) + // } + // } + // if self.is_prefix_of(&other) + } } impl std::ops::Deref for Path { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index b4edc375b0..7333aaeb52 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,14 +1,18 @@ use core::fmt; use std::collections::{hash_map, HashMap, VecDeque}; +use anyhow::bail; +use ed25519_dalek::SignatureError; use tracing::warn; use crate::{ proto::{ - keys::NamespaceId, - meadowcap::is_authorised_write, + keys::{NamespaceId, NamespacePublicKey, Signature, UserSecretKey}, + meadowcap::{is_authorised_write, InvalidCapability}, wgps::{ - Area, Fingerprint, IntersectionHandle, LengthyEntry, StaticTokenHandle, ThreeDRange, + AccessChallenge, Area, ChallengeHash, CommitmentReveal, Fingerprint, Handle, + HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, + StaticTokenHandle, ThreeDRange, CHALLENGE_HASH_LENGTH, }, willow::{ AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, @@ -25,68 +29,20 @@ use super::proto::wgps::{ SetupBindStaticToken, StaticToken, }; -#[derive(Debug, derive_more::From, derive_more::TryInto)] -pub enum Message { - Control(ControlMessage), - Reconciliation(ReconciliationMessage), -} - -#[derive(Debug, derive_more::From)] -pub enum ControlMessage { - // TODO: move to CapabilityChannel - SetupBindReadCapability(SetupBindReadCapability), - // TODO: move to StaticTokenChannel - SetupBindStaticToken(SetupBindStaticToken), - // TODO: move to AreaOfInterestChannel - SetupBindAreaOfInterest(SetupBindAreaOfInterest), - // IssueGuarantee(ControlIssueGuarantee), - // Absolve(ControlAbsolve), - // Plead(ControlPlead), - // AnnounceDropping(ControlAnnounceDropping), - // Apologise(ControlApologise), - FreeHandle(ControlFreeHandle), -} - -#[derive(Debug, derive_more::From)] -pub enum ReconciliationMessage { - SendFingerprint(ReconciliationSendFingerprint), - AnnounceEntries(ReconciliationAnnounceEntries), - SendEntry(ReconciliationSendEntry), -} - -// struct HandleMap { -// next_handle: u64, -// map: HashMap, -// } -// impl HandleMap -// where -// R: std::hash::Hash + Eq, -// H: Handle, -// { -// pub fn bind(&mut self, value: R) -> (H, bool) { -// match self.map.entry(value) { -// hash_map::Entry::Occupied(handle) => (*handle.get(), false), -// hash_map::Entry::Vacant(entry) => { -// let handle: H = self.next_handle.into(); -// self.next_handle += 1; -// entry.insert(handle); -// (handle, true) -// } -// } -// } -// } - -#[derive(Debug, Default)] +#[derive(Debug)] struct ResourceMap { next_handle: u64, map: HashMap>, } -pub trait Handle: std::hash::Hash + From + Copy + Eq + PartialEq {} - -impl Handle for CapabilityHandle {} -impl Handle for StaticTokenHandle {} -impl Handle for AreaOfInterestHandle {} +impl Default for ResourceMap { + fn default() -> Self { + Self { + next_handle: 0, + map: Default::default(), + } + } +} #[derive(Debug)] enum ResourceState { @@ -135,16 +91,26 @@ where pub enum Error { #[error("local store failed")] Store(#[from] anyhow::Error), + #[error("wrong secret key for capability")] + WrongSecretKeyForCapability, #[error("missing resource")] MissingResource, + #[error("received capability is invalid")] + InvalidCapability, + #[error("received capability has an invalid signature")] + InvalidSignature, #[error("missing resource")] RangeOutsideCapability, #[error("received a message that is not valid in the current session state")] InvalidMessageInCurrentState, #[error("our and their area of interests refer to different namespaces")] AreaOfInterestNamespaceMismatch, + #[error("our and their area of interests do not overlap")] + AreaOfInterestDoesNotOverlap, #[error("received an entry which is not authorised")] UnauthorisedEntryReceived, + #[error("received an unsupported message type")] + UnsupportedMessage, } impl From for Error { @@ -152,6 +118,17 @@ impl From for Error { Self::UnauthorisedEntryReceived } } +impl From for Error { + fn from(_value: InvalidCapability) -> Self { + Self::InvalidCapability + } +} + +impl From for Error { + fn from(_value: SignatureError) -> Self { + Self::InvalidSignature + } +} #[derive(Debug)] struct Resource { @@ -169,10 +146,7 @@ impl Resource { } } -pub const CHALLENGE_LENGTH: usize = 32; -pub type Challenge = [u8; CHALLENGE_LENGTH]; - -#[derive(Debug)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Role { Betty, Alfie, @@ -181,90 +155,212 @@ pub enum Role { #[derive(Debug)] pub struct Session { our_role: Role, + our_nonce: AccessChallenge, + init: Option, + challenge: Option, + + their_maximum_payload_size: usize, + received_commitment: ChallengeHash, + + control_channel: Channel, + reconciliation_channel: Channel, - control_channel: Channel, - reconciliation_channel: Channel, + our_current_aoi: Option, us: PeerState, them: PeerState, } -#[derive(Debug, PartialEq, Eq)] -struct BoundAreaOfInterest { - area_of_interest: AreaOfInterest, - authorisation: CapabilityHandle, - namespace: NamespaceId, +#[derive(Debug)] +pub struct Challenges { + ours: AccessChallenge, + theirs: AccessChallenge, } -impl BoundAreaOfInterest { - pub fn includes_range(&self, range: &ThreeDRange) -> bool { - self.area_of_interest.area.includes_range(range) +impl Challenges { + pub fn from_nonces( + our_role: Role, + our_nonce: AccessChallenge, + their_nonce: AccessChallenge, + ) -> Self { + let ours = match our_role { + Role::Alfie => bitwise_xor(our_nonce, their_nonce), + Role::Betty => bitwise_xor_complement(our_nonce, their_nonce), + }; + let theirs = bitwise_complement(ours); + Self { ours, theirs } } } -#[derive(Debug)] +#[derive(Debug, Default)] pub struct PeerState { - challenge: Challenge, capabilities: ResourceMap, - areas_of_interest: ResourceMap, + areas_of_interest: ResourceMap, static_tokens: ResourceMap, reconciliation_announce_entries: Option, // intersections: ResourceMap, } +#[derive(Debug)] +pub struct SessionInit { + user_secret_key: UserSecretKey, + // TODO: allow multiple capabilities + capability: ReadCapability, + // TODO: allow multiple areas of interest + area_of_interest: AreaOfInterest, +} + impl Session { - pub fn recv(&mut self, message: Message) { - match message { - Message::Control(msg) => self.control_channel.inbox_push_or_drop(msg), - Message::Reconciliation(msg) => self.reconciliation_channel.inbox_push_or_drop(msg), - } + pub fn new( + our_role: Role, + our_nonce: AccessChallenge, + their_maximum_payload_size: usize, + received_commitment: ChallengeHash, + init: SessionInit, + ) -> Self { + let mut this = Self { + our_role, + our_nonce, + challenge: None, + their_maximum_payload_size, + received_commitment, + control_channel: Default::default(), + reconciliation_channel: Default::default(), + us: Default::default(), + them: Default::default(), + our_current_aoi: None, // config + init: Some(init), + }; + let msg = CommitmentReveal { nonce: our_nonce }; + this.control_channel.send(msg); + this + } + + fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { + let challenge = self + .challenge + .as_ref() + .ok_or(Error::InvalidMessageInCurrentState)?; + let signature = secret_key.sign(&challenge.ours); + Ok(signature) } - pub fn pop_send(&mut self) -> Option { - if let Some(message) = self.control_channel.outbox.pop_front() { - return Some(message.into()); + pub fn drain_outbox(&mut self) -> impl Iterator + '_ { + self.control_channel + .outbox_drain() + .chain(self.reconciliation_channel.outbox_drain()) + } + + pub fn init(&mut self, init: &SessionInit) -> Result<(), Error> { + let area_of_interest = init.area_of_interest.clone(); + let capability = init.capability.clone(); + + if *capability.receiver() == init.user_secret_key.public_key() { + return Err(Error::WrongSecretKeyForCapability); + } + + // TODO: implement private area intersection + let intersection_handle = 0.into(); + + // register read capability + let signature = self.sign_challenge(&init.user_secret_key)?; + let our_capability_handle = self.us.capabilities.bind(capability.clone()); + let msg = SetupBindReadCapability { + capability, + handle: intersection_handle, + signature, }; - if let Some(message) = self.reconciliation_channel.outbox.pop_front() { - return Some(message.into()); + self.control_channel.send(msg); + + // register area of interest + let msg = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, }; - None + let our_aoi_handle = self.us.areas_of_interest.bind(msg.clone()); + self.control_channel.send(msg); + self.our_current_aoi = Some(our_aoi_handle); + + Ok(()) + } + + pub fn our_role(&self) -> Role { + self.our_role } - pub fn process(&mut self, store: &mut S) { + pub fn recv(&mut self, message: Message) { + match message.logical_channel() { + LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), + LogicalChannel::ReconciliationChannel => { + self.reconciliation_channel.inbox_push_or_drop(message) + } + } + } + + pub fn process(&mut self, store: &mut S) -> Result<(), Error> { while let Some(message) = self.control_channel.inbox_pop() { - self.process_control(message).ok(); + self.process_control(store, message)?; } while let Some(message) = self.reconciliation_channel.inbox_pop() { - self.process_reconciliation(message, store).ok(); + self.process_reconciliation(store, message)?; } + Ok(()) } - pub fn process_control(&mut self, message: ControlMessage) -> anyhow::Result<()> { + pub fn process_control( + &mut self, + store: &mut S, + message: Message, + ) -> Result<(), Error> { match message { - ControlMessage::SetupBindReadCapability(msg) => { + Message::CommitmentReveal(msg) => { + if self.challenge.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + self.challenge = Some(Challenges::from_nonces( + self.our_role, + self.our_nonce, + msg.nonce, + )); + if let Some(init) = self.init.take() { + self.init(&init)?; + } else { + return Err(Error::InvalidMessageInCurrentState); + } + } + Message::SetupBindReadCapability(msg) => { + let challenge = self + .challenge + .as_ref() + .ok_or(Error::InvalidMessageInCurrentState)?; msg.capability.validate()?; msg.capability .receiver() - .verify(&self.us.challenge, &msg.signature)?; - // todo: validate intersection handle + .verify(&challenge.theirs, &msg.signature)?; + // TODO: verify intersection handle self.them.capabilities.bind(msg.capability); } - ControlMessage::SetupBindStaticToken(msg) => { + Message::SetupBindStaticToken(msg) => { self.them.static_tokens.bind(msg.static_token); } - ControlMessage::SetupBindAreaOfInterest(msg) => { + Message::SetupBindAreaOfInterest(msg) => { let capability = self.them.capabilities.try_get(&msg.authorisation)?; capability.try_granted_area(&msg.area_of_interest.area)?; - let bound_aoi = BoundAreaOfInterest { - area_of_interest: msg.area_of_interest, - authorisation: msg.authorisation, - namespace: capability.granted_namespace().into(), - }; - // let namespace = capability.granted_namespace(); - self.them.areas_of_interest.bind(bound_aoi); + let their_aoi_handle = self.them.areas_of_interest.bind(msg); + + if self.our_role == Role::Alfie { + if let Some(our_aoi_handle) = self.our_current_aoi.clone() { + self.init_reconciliation(store, &our_aoi_handle, &their_aoi_handle)?; + } else { + warn!( + "received area of interest from remote, but nothing setup on our side" + ); + } + } } - ControlMessage::FreeHandle(_msg) => { + Message::ControlFreeHandle(_msg) => { // TODO: Free handles } + _ => return Err(Error::UnsupportedMessage), } Ok(()) } @@ -274,28 +370,60 @@ impl Session { if is_new { let msg = SetupBindStaticToken { static_token }; self.control_channel - .send(ControlMessage::SetupBindStaticToken(msg)); + .send(Message::SetupBindStaticToken(msg)); } handle } + /// Uses the blocking [`Store`] and thus may only be called in the worker thread. + pub fn init_reconciliation( + &mut self, + store: &mut S, + our_aoi_handle: &AreaOfInterestHandle, + their_aoi_handle: &AreaOfInterestHandle, + ) -> Result<(), Error> { + let our_aoi = self.us.areas_of_interest.try_get(&our_aoi_handle)?; + let their_aoi = self.us.areas_of_interest.try_get(&their_aoi_handle)?; + + let our_capability = self.us.capabilities.try_get(&our_aoi.authorisation)?; + let namespace = our_capability.granted_namespace(); + + // TODO: intersect with their_aoi first + let area = &our_aoi + .area() + .intersection(&their_aoi.area()) + .ok_or(Error::AreaOfInterestDoesNotOverlap)?; + + let range = area.into_range(); + let fingerprint = store.range_fingerprint(namespace.into(), &range)?; + + let msg = ReconciliationSendFingerprint { + range, + fingerprint, + sender_handle: *our_aoi_handle, + receiver_handle: *their_aoi_handle, + }; + self.reconciliation_channel.send(msg); + Ok(()) + } + /// Uses the blocking [`Store`] and thus may only be called in the worker thread. pub fn process_reconciliation( &mut self, - message: ReconciliationMessage, store: &mut S, + message: Message, ) -> Result<(), Error> { match message { - ReconciliationMessage::SendFingerprint(msg) => { + Message::ReconciliationSendFingerprint(message) => { let ReconciliationSendFingerprint { range, fingerprint, sender_handle, receiver_handle, - } = msg; + } = message; let namespace = self.authorise_range(&range, &receiver_handle, &sender_handle)?; - let our_fingerprint = store.get_fingerprint(namespace, &range)?; + let our_fingerprint = store.range_fingerprint(namespace, &range)?; // case 1: fingerprint match. if our_fingerprint == fingerprint { @@ -308,7 +436,7 @@ impl Session { receiver_handle, }; self.reconciliation_channel - .send(ReconciliationMessage::AnnounceEntries(msg)); + .send(Message::ReconciliationAnnounceEntries(msg)); } else { for part in store.split_range(namespace, &range)?.into_iter() { match part { @@ -320,7 +448,7 @@ impl Session { receiver_handle, }; self.reconciliation_channel - .send(ReconciliationMessage::SendFingerprint(msg)); + .send(Message::ReconciliationSendFingerprint(msg)); } RangeSplitPart::SendEntries(range, local_count) => { let msg = ReconciliationAnnounceEntries { @@ -331,7 +459,7 @@ impl Session { sender_handle, receiver_handle, }; - self.reconciliation_channel.send(msg.into()); + self.reconciliation_channel.send(msg); for authorised_entry in store.get_entries_with_authorisation(namespace, &range) { @@ -346,25 +474,29 @@ impl Session { static_token_handle, dynamic_token, }; - self.reconciliation_channel.send(msg.into()); + self.reconciliation_channel.send(msg); } } } } } } - ReconciliationMessage::AnnounceEntries(msg) => { + Message::ReconciliationAnnounceEntries(message) => { if self.them.reconciliation_announce_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } - self.authorise_range(&msg.range, &msg.receiver_handle, &msg.sender_handle)?; - if msg.count == 0 { + self.authorise_range( + &message.range, + &message.receiver_handle, + &message.sender_handle, + )?; + if message.count == 0 { // todo: what do we need to do here? } else { - self.them.reconciliation_announce_entries = Some(msg) + self.them.reconciliation_announce_entries = Some(message) } } - ReconciliationMessage::SendEntry(msg) => { + Message::ReconciliationSendEntry(message) => { let state = self .them .reconciliation_announce_entries @@ -374,9 +506,9 @@ impl Session { entry, static_token_handle, dynamic_token, - } = msg; + } = message; let static_token = self.them.static_tokens.try_get(&static_token_handle)?; - // TODO: omit clone of static token? + // TODO: avoid clone let authorisation_token = AuthorisationToken::from_parts(static_token.clone(), dynamic_token); let authorised_entry = @@ -388,6 +520,7 @@ impl Session { self.them.reconciliation_announce_entries = None; } } + _ => return Err(Error::UnsupportedMessage), } Ok(()) } @@ -398,17 +531,46 @@ impl Session { receiver_handle: &AreaOfInterestHandle, sender_handle: &AreaOfInterestHandle, ) -> Result { - let ours = self.us.areas_of_interest.try_get(&receiver_handle)?; - let theirs = self.them.areas_of_interest.try_get(&sender_handle)?; - if !ours.includes_range(&range) || !theirs.includes_range(&range) { - return Err(Error::RangeOutsideCapability); - }; - if ours.namespace != theirs.namespace { + let our_namespace = self.handle_to_namespace_id(Scope::Us, receiver_handle)?; + let their_namespace = self.handle_to_namespace_id(Scope::Them, sender_handle)?; + if our_namespace != their_namespace { return Err(Error::AreaOfInterestNamespaceMismatch); } - Ok(ours.namespace) + let our_aoi = self.handle_to_aoi(Scope::Us, receiver_handle)?; + let their_aoi = self.handle_to_aoi(Scope::Them, sender_handle)?; + + if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + return Err(Error::RangeOutsideCapability); + } + Ok(our_namespace.into()) + } + + fn handle_to_aoi( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result<&SetupBindAreaOfInterest, Error> { + match scope { + Scope::Us => self.us.areas_of_interest.try_get(handle), + Scope::Them => self.them.areas_of_interest.try_get(handle), + } + } + + fn handle_to_namespace_id( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result<&NamespacePublicKey, Error> { + let aoi = self.handle_to_aoi(scope, handle)?; + let capability = match scope { + Scope::Us => self.us.capabilities.try_get(&aoi.authorisation)?, + Scope::Them => self.them.capabilities.try_get(&aoi.authorisation)?, + }; + Ok(capability.granted_namespace()) } } + +#[derive(Copy, Clone, Debug)] enum Scope { Us, Them, @@ -420,6 +582,14 @@ pub struct Channel { outbox: VecDeque, // issued_guarantees: usize, } +impl Default for Channel { + fn default() -> Self { + Self { + inbox: Default::default(), + outbox: Default::default(), + } + } +} impl Channel { pub fn with_capacity(cap: usize) -> Self { @@ -429,12 +599,20 @@ impl Channel { } } - pub fn send(&mut self, value: T) -> bool { - self.outbox.push_back(value); - self.has_capacity() + pub fn send(&mut self, value: impl Into) -> bool { + self.outbox.push_back(value.into()); + self.has_inbox_capacity() + } + + fn outbox_drain(&mut self) -> impl Iterator + '_ { + self.outbox.drain(..) } - pub fn inbox_pop(&mut self) -> Option { + // fn inbox_drain(&mut self) -> impl Iterator + '_ { + // self.inbox.drain(..) + // } + + fn inbox_pop(&mut self) -> Option { self.inbox.pop_front() } @@ -444,19 +622,19 @@ impl Channel { } } pub fn inbox_push(&mut self, message: T) -> Option { - if self.has_capacity() { + if self.has_inbox_capacity() { self.inbox.push_back(message); None } else { Some(message) } } - pub fn remaining_capacity(&self) -> usize { + pub fn remaining_inbox_capacity(&self) -> usize { self.inbox.capacity() - self.inbox.len() } - pub fn has_capacity(&self) -> bool { - self.remaining_capacity() > 0 + pub fn has_inbox_capacity(&self) -> bool { + self.remaining_inbox_capacity() > 0 } // pub fn issuable_guarantees(&self) -> usize { @@ -469,3 +647,82 @@ impl Channel { // val // } } + +fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = x1 ^ x2; + } + res +} + +fn bitwise_complement(a: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, x) in a.iter().enumerate() { + res[i] = !x; + } + res +} + +fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = !(x1 ^ x2); + } + res +} + +// #[derive(Debug, derive_more::From, derive_more::TryInto)] +// pub enum ScopedMessage { +// Control(ControlMessage), +// Reconciliation(ReconciliationMessage), +// } +// +// impl From for ScopedMessage { +// fn from(value: Message) -> Self { +// match value { +// Message::ReconciliationSendFingerprint(msg) => Self::Reconciliation(msg.into()), +// Message::ReconciliationAnnounceEntries(msg) => Self::Reconciliation(msg.into()), +// Message::ReconciliationSendEntry(msg) => Self::Reconciliation(msg.into()), +// +// Message::CommitmentReveal(msg) => Self::Control(msg.into()), +// Message::SetupBindStaticToken(msg) => Self::Control(msg.into()), +// Message::SetupBindReadCapability(msg) => Self::Control(msg.into()), +// Message::SetupBindAreaOfInterest(msg) => Self::Control(msg.into()), +// +// Message::ControlIssueGuarantee(msg) => Self::Control(msg.into()), +// Message::ControlAbsolve(msg) => Self::Control(msg.into()), +// Message::ControlPlead(msg) => Self::Control(msg.into()), +// Message::ControlAnnounceDropping(msg) => Self::Control(msg.into()), +// Message::ControlApologise(msg) => Self::Control(msg.into()), +// Message::ControlFreeHandle(msg) => Self::Control(msg.into()), +// } +// } +// } +// +// #[derive(Debug, derive_more::From)] +// pub enum ReconciliationMessage { +// SendFingerprint(ReconciliationSendFingerprint), +// AnnounceEntries(ReconciliationAnnounceEntries), +// SendEntry(ReconciliationSendEntry), +// } +// +// #[derive(Debug, derive_more::From)] +// pub enum ControlMessage { +// CommitmentReveal(CommitmentReveal), +// // TODO: move to CapabilityChannel +// SetupBindReadCapability(SetupBindReadCapability), +// // TODO: move to StaticTokenChannel +// SetupBindStaticToken(SetupBindStaticToken), +// // TODO: move to AreaOfInterestChannel +// SetupBindAreaOfInterest(SetupBindAreaOfInterest), +// +// IssueGuarantee(ControlIssueGuarantee), +// Absolve(ControlAbsolve), +// Plead(ControlPlead), +// AnnounceDropping(ControlAnnounceDropping), +// Apologise(ControlApologise), +// +// FreeHandle(ControlFreeHandle), +// } +// diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs new file mode 100644 index 0000000000..956d989a82 --- /dev/null +++ b/iroh-willow/src/store.rs @@ -0,0 +1,105 @@ +use std::collections::HashMap; + +use crate::proto::{ + wgps::{Area, Fingerprint, ThreeDRange}, + willow::{AuthorisedEntry, NamespaceId}, +}; + +pub trait Store { + fn range_fingerprint( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> anyhow::Result; + + fn split_range( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> anyhow::Result; + + fn get_entries_with_authorisation<'a>( + &'a mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a; + + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> anyhow::Result<()>; +} + +#[derive(Debug, Default)] +pub struct MemoryStore { + entries: HashMap>, +} + +impl Store for MemoryStore { + fn range_fingerprint( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> anyhow::Result { + let _ = namespace; + let _ = range; + todo!() + } + + fn split_range( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> anyhow::Result { + let _ = namespace; + let _ = range; + todo!() + } + + fn get_entries_with_authorisation<'a>( + &'a mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a { + let _ = namespace; + let _ = range; + None.into_iter() + } + + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> anyhow::Result<()> { + let _ = entry; + todo!() + } +} + +#[derive(Debug)] +pub enum RangeSplit { + SendEntries(ThreeDRange, u64), + SendSplit([RangeSplitPart; 2]), +} + +impl IntoIterator for RangeSplit { + type IntoIter = RangeSplitIterator; + type Item = RangeSplitPart; + fn into_iter(self) -> Self::IntoIter { + RangeSplitIterator(match self { + RangeSplit::SendEntries(range, len) => { + [Some(RangeSplitPart::SendEntries(range, len)), None] + } + RangeSplit::SendSplit(parts) => parts.map(Option::Some), + }) + } +} + +#[derive(Debug)] +pub struct RangeSplitIterator([Option; 2]); + +impl Iterator for RangeSplitIterator { + type Item = RangeSplitPart; + fn next(&mut self) -> Option { + self.0.iter_mut().filter_map(Option::take).next() + } +} + +#[derive(Debug)] +pub enum RangeSplitPart { + SendEntries(ThreeDRange, u64), + SendFingerprint(ThreeDRange, Fingerprint), +} From 07c446d1db39fe195af72e2308fff9362bcdaccc Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 23 Apr 2024 12:05:01 +0200 Subject: [PATCH 004/198] progress --- iroh-willow/src/net.rs | 173 ++++++++++++++++++++- iroh-willow/src/proto/keys.rs | 125 +++++++++++++--- iroh-willow/src/proto/meadowcap.rs | 46 +++++- iroh-willow/src/proto/wgps.rs | 67 +++++++-- iroh-willow/src/proto/willow.rs | 51 ++++++- iroh-willow/src/session.rs | 233 +++++++++++++---------------- iroh-willow/src/store.rs | 101 +++++++++++-- 7 files changed, 609 insertions(+), 187 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 0496d55653..7a0110215a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,9 +1,10 @@ use anyhow::ensure; -use futures::SinkExt; +use futures::{FutureExt, SinkExt, TryFutureExt}; use iroh_base::hash::Hash; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio_stream::StreamExt; use tokio_util::codec::{FramedRead, FramedWrite}; +use tracing::debug; use crate::{ proto::wgps::{ @@ -11,25 +12,32 @@ use crate::{ MAXIMUM_PAYLOAD_SIZE_POWER, }, session::{Role, Session, SessionInit}, - store::MemoryStore, + store::{MemoryStore, Store}, }; use self::codec::WillowCodec; pub mod codec; -async fn run(conn: quinn::Connection, our_role: Role, init: SessionInit) -> anyhow::Result<()> { - let (mut send, mut recv) = match our_role { +async fn run( + store: &mut S, + conn: quinn::Connection, + role: Role, + init: SessionInit, +) -> anyhow::Result<()> { + let (mut send, mut recv) = match role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; let our_nonce: AccessChallenge = rand::random(); + debug!(?role, "start"); let (received_commitment, maximum_payload_size) = exchange_commitments(&mut send, &mut recv, &our_nonce).await?; + debug!(?role, "exchanged comittments"); let mut session = Session::new( - our_role, + role, our_nonce, maximum_payload_size, received_commitment, @@ -39,9 +47,18 @@ async fn run(conn: quinn::Connection, our_role: Role, init: SessionInit) -> anyh let mut reader = FramedRead::new(recv, WillowCodec); let mut writer = FramedWrite::new(send, WillowCodec); - let mut store = MemoryStore::default(); + // move to store thread for this! + session.process(store)?; + + // back in network land: send out everything + // should be in parallel with reading + for message in session.drain_outbox() { + debug!(role=?role, ?message, "send"); + writer.send(message).await?; + } while let Some(message) = reader.try_next().await? { + debug!(?role, ?message, "recv"); // TODO: buffer more than a single message here before handing off to store thread // what we should do here: // * notify store thread that we want to process @@ -51,13 +68,18 @@ async fn run(conn: quinn::Connection, our_role: Role, init: SessionInit) -> anyh session.recv(message.into()); // move to store thread for this! - session.process(&mut store)?; + let done = session.process(store)?; // back in network land: send out everything // should be in parallel with reading for message in session.drain_outbox() { + debug!(role=?role, ?message, "send"); writer.send(message).await?; } + + if done { + break; + } } Ok(()) } @@ -82,3 +104,140 @@ async fn exchange_commitments( recv.read_exact(&mut received_commitment).await?; Ok((received_commitment, their_maximum_payload_size)) } + +#[cfg(test)] +mod tests { + use iroh_base::hash::Hash; + use iroh_net::MagicEndpoint; + use rand::SeedableRng; + use tracing::debug; + + use crate::{ + net::run, + proto::{ + keys::{NamespaceId, NamespaceSecretKey, NamespaceType, UserSecretKey}, + meadowcap::{AccessMode, McCapability, OwnedCapability}, + wgps::{AreaOfInterest, ReadCapability}, + willow::{Entry, Path}, + }, + session::{Role, SessionInit}, + store::{MemoryStore, Store}, + }; + + const ALPN: &[u8] = b"iroh-willow/0"; + + #[tokio::test] + async fn smoke() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + + let ep_alfie = MagicEndpoint::builder() + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let ep_betty = MagicEndpoint::builder() + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + + let addr_betty = ep_betty.my_addr().await?; + + debug!("start connect"); + let (conn_alfie, conn_betty) = tokio::join!( + async move { ep_alfie.connect(addr_betty, ALPN).await }, + async move { + let connecting = ep_betty.accept().await.unwrap(); + connecting.await + } + ); + let conn_alfie = conn_alfie.unwrap(); + let conn_betty = conn_betty.unwrap(); + debug!("connected"); + + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceType::Owned); + let namespace_id: NamespaceId = namespace_secret.public_key().into(); + + let mut store_alfie = MemoryStore::default(); + let init_alfie = { + let secret_key = UserSecretKey::generate(&mut rng); + let public_key = secret_key.public_key(); + let read_capability = ReadCapability::Owned(OwnedCapability::new( + &namespace_secret, + public_key, + AccessMode::Read, + )); + let area_of_interest = AreaOfInterest::full(); + let write_capability = McCapability::Owned(OwnedCapability::new( + &namespace_secret, + public_key, + AccessMode::Write, + )); + for i in 0..3 { + let p = format!("alfie{i}"); + let entry = Entry { + namespace_id, + subspace_id: public_key.into(), + path: Path::new(&[p.as_bytes()])?, + timestamp: 10, + payload_length: 2, + payload_digest: Hash::new("cool things"), + }; + let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; + store_alfie.ingest_entry(&entry)?; + } + SessionInit { + user_secret_key: secret_key, + capability: read_capability, + area_of_interest, + } + }; + + let mut store_betty = MemoryStore::default(); + let init_betty = { + let secret_key = UserSecretKey::generate(&mut rng); + let public_key = secret_key.public_key(); + let read_capability = McCapability::Owned(OwnedCapability::new( + &namespace_secret, + public_key, + AccessMode::Read, + )); + let area_of_interest = AreaOfInterest::full(); + let write_capability = McCapability::Owned(OwnedCapability::new( + &namespace_secret, + public_key, + AccessMode::Write, + )); + for i in 0..3 { + let p = format!("betty{i}"); + let entry = Entry { + namespace_id, + subspace_id: public_key.into(), + path: Path::new(&[p.as_bytes()])?, + timestamp: 10, + payload_length: 2, + payload_digest: Hash::new("cool things"), + }; + let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; + store_betty.ingest_entry(&entry)?; + } + SessionInit { + user_secret_key: secret_key, + capability: read_capability, + area_of_interest, + } + }; + + debug!("init constructed"); + + let (res_alfie, res_betty) = tokio::join!( + run(&mut store_alfie, conn_alfie, Role::Alfie, init_alfie), + run(&mut store_betty, conn_betty, Role::Betty, init_betty), + ); + res_alfie.unwrap(); + res_betty.unwrap(); + println!("alfie {:?}", store_alfie); + println!("betty {:?}", store_betty); + + Ok(()) + } +} diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index a5b59db1c6..1016c1f499 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -15,7 +15,65 @@ pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; pub type SubspaceId = UserId; -pub type Signature = ed25519_dalek::Signature; +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct NamespaceSignature(ed25519_dalek::Signature); + +impl NamespaceSignature { + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) + } + +} + +impl fmt::Display for NamespaceSignature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(&self.to_bytes())) + } +} +impl fmt::Debug for NamespaceSignature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "NamespaceSignature({})", self.fmt_short()) + } +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct UserSignature(ed25519_dalek::Signature); + +impl UserSignature { + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) + } + +} + +impl fmt::Display for UserSignature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(&self.to_bytes())) + } +} +impl fmt::Debug for UserSignature { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserSignature({})", self.fmt_short()) + } +} + +impl std::ops::Deref for UserSignature { + type Target = ed25519_dalek::Signature; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::Deref for NamespaceSignature { + type Target = ed25519_dalek::Signature; + fn deref(&self) -> &Self::Target { + &self.0 + } +} /// User key to insert entries in a [`crate::Replica`] /// @@ -51,13 +109,19 @@ impl UserSecretKey { } /// Sign a message with this [`UserSecretKey`] key. - pub fn sign(&self, msg: &[u8]) -> Signature { - self.0.sign(msg) + pub fn sign(&self, msg: &[u8]) -> UserSignature { + UserSignature(self.0.sign(msg)) } /// Strictly verify a signature on a message with this [`UserSecretKey`]'s public key. - pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, signature) + pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) + } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) } } @@ -71,8 +135,8 @@ pub struct UserPublicKey(VerifyingKey); impl UserPublicKey { /// Verify that a signature matches the `msg` bytes and was created with the [`UserSecretKey`] /// that corresponds to this [`UserId`]. - pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, signature) + pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) } /// Get the byte representation of this [`UserId`]. @@ -80,6 +144,11 @@ impl UserPublicKey { self.0.as_bytes() } + /// Get the byte representation of this [`UserId`]. + pub fn to_bytes(&self) -> [u8; 32] { + *self.0.as_bytes() + } + /// Create from a slice of bytes. /// /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] @@ -88,6 +157,12 @@ impl UserPublicKey { pub fn from_bytes(bytes: &[u8; 32]) -> Result { Ok(UserPublicKey(VerifyingKey::from_bytes(bytes)?)) } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) + } } #[derive(Debug, Eq, PartialEq, Copy, Clone)] @@ -136,13 +211,19 @@ impl NamespaceSecretKey { } /// Sign a message with this [`NamespaceSecretKey] key. - pub fn sign(&self, msg: &[u8]) -> Signature { - self.0.sign(msg) + pub fn sign(&self, msg: &[u8]) -> NamespaceSignature { + NamespaceSignature(self.0.sign(msg)) } /// Strictly verify a signature on a message with this [`NamespaceSecretKey]'s public key. - pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, signature) + pub fn verify(&self, msg: &[u8], signature: &NamespaceSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) + } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) } } @@ -166,8 +247,8 @@ impl NamespacePublicKey { /// Verify that a signature matches the `msg` bytes and was created with the [`NamespaceSecretKey] /// that corresponds to this [`NamespaceId`]. - pub fn verify(&self, msg: &[u8], signature: &Signature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, signature) + pub fn verify(&self, msg: &[u8], signature: &NamespaceSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) } /// Get the byte representation of this [`NamespaceId`]. @@ -183,6 +264,12 @@ impl NamespacePublicKey { pub fn from_bytes(bytes: &[u8; 32]) -> Result { Ok(NamespacePublicKey(VerifyingKey::from_bytes(bytes)?)) } + + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(self.as_bytes()) + } } impl fmt::Display for UserSecretKey { @@ -223,37 +310,37 @@ impl fmt::Display for NamespaceId { impl fmt::Debug for NamespaceSecretKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Namespace({})", self) + write!(f, "NamespaceSecretKey({})", self.fmt_short()) } } impl fmt::Debug for NamespaceId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespaceId({})", base32::fmt_short(self.0)) + write!(f, "NamespaceId({})", self.fmt_short()) } } impl fmt::Debug for UserId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserId({})", base32::fmt_short(self.0)) + write!(f, "UserId({})", self.fmt_short()) } } impl fmt::Debug for UserSecretKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "User({})", self) + write!(f, "UserSecretKey({})", self.fmt_short()) } } impl fmt::Debug for NamespacePublicKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespacePublicKey({})", self) + write!(f, "NamespacePublicKey({})", self.fmt_short()) } } impl fmt::Debug for UserPublicKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserPublicKey({})", self) + write!(f, "UserPublicKey({})", self.fmt_short()) } } diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index c72841c1c8..9ef447717a 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,15 +1,16 @@ use serde::{Deserialize, Serialize}; +use tracing::debug; use super::{ - keys::{self, NamespaceSecretKey, PUBLIC_KEY_LENGTH}, + keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH}, wgps::Area, - willow::{Entry, Unauthorised}, + willow::{AuthorisedEntry, Entry, Unauthorised}, }; -pub type UserSignature = keys::Signature; +pub type UserSignature = keys::UserSignature; pub type UserPublicKey = keys::UserPublicKey; pub type NamespacePublicKey = keys::NamespacePublicKey; -pub type NamespaceSignature = keys::Signature; +pub type NamespaceSignature = keys::NamespaceSignature; pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { let (capability, signature) = token.as_parts(); @@ -23,17 +24,46 @@ pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) - .is_ok() } +pub fn create_token( + entry: &Entry, + capability: McCapability, + secret_key: &UserSecretKey, +) -> MeadowcapAuthorisationToken { + let signable = entry.encode(); + let signature = secret_key.sign(&signable); + MeadowcapAuthorisationToken::from_parts(capability, signature) +} + +pub fn attach_authorisation( + entry: Entry, + capability: McCapability, + secret_key: &UserSecretKey, +) -> Result { + if capability.access_mode() != AccessMode::Write + || !capability.granted_area().includes_entry(&entry) + || capability.receiver() != &secret_key.public_key() + { + return Err(InvalidParams); + } + let token = create_token(&entry, capability, secret_key); + Ok(AuthorisedEntry::from_parts_unchecked(entry, token)) +} + +#[derive(Debug, thiserror::Error)] +#[error("invalid parameters")] +pub struct InvalidParams; + #[derive(Debug, thiserror::Error)] -#[error("unauthorised")] +#[error("invalid capability")] pub struct InvalidCapability; /// To be used as an AuthorisationToken for Willow. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct MeadowcapAuthorisationToken { /// Certifies that an Entry may be written. - capability: McCapability, + pub capability: McCapability, /// Proves that the Entry was created by the receiver of the capability. - signature: UserSignature, + pub signature: UserSignature, } // /// To be used as an AuthorisationToken for Willow. diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 5750c7ff27..5f95207ba4 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -2,6 +2,7 @@ use std::cmp::Ordering; use bytes::Bytes; use ed25519_dalek::Signature; +use iroh_base::hash::Hash; use iroh_net::key::PublicKey; use serde::{Deserialize, Serialize}; @@ -130,9 +131,10 @@ impl Handle for IntersectionHandle { } /// Complete the commitment scheme to determine the challenge for read authentication. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Serialize, Deserialize, PartialEq, Eq, derive_more::Debug)] pub struct CommitmentReveal { /// The nonce of the sender, encoded as a big-endian unsigned integer. + #[debug("{}..", iroh_base::base32::fmt_short(self.nonce))] pub nonce: AccessChallenge, } @@ -374,29 +376,42 @@ impl SubspaceArea { } } -#[derive(Debug, Serialize, Deserialize, derive_more::From)] +#[derive(Serialize, Deserialize, derive_more::From, derive_more::Debug)] pub enum Message { + #[debug("{:?}", _0)] CommitmentReveal(CommitmentReveal), // PaiReplyFragment // PaiBindFragment // PaiRequestSubspaceCapability // PaiReplySubspaceCapability + #[debug("{:?}", _0)] SetupBindStaticToken(SetupBindStaticToken), + #[debug("{:?}", _0)] SetupBindReadCapability(SetupBindReadCapability), + #[debug("{:?}", _0)] SetupBindAreaOfInterest(SetupBindAreaOfInterest), + #[debug("{:?}", _0)] ReconciliationSendFingerprint(ReconciliationSendFingerprint), + #[debug("{:?}", _0)] ReconciliationAnnounceEntries(ReconciliationAnnounceEntries), + #[debug("{:?}", _0)] ReconciliationSendEntry(ReconciliationSendEntry), // DataSendEntry // DataSendPayload // DataSetMetadata // DataBindPayloadRequest // DataReplyPayload + #[debug("{:?}", _0)] ControlIssueGuarantee(ControlIssueGuarantee), + #[debug("{:?}", _0)] ControlAbsolve(ControlAbsolve), + #[debug("{:?}", _0)] ControlPlead(ControlPlead), + #[debug("{:?}", _0)] ControlAnnounceDropping(ControlAnnounceDropping), + #[debug("{:?}", _0)] ControlApologise(ControlApologise), + #[debug("{:?}", _0)] ControlFreeHandle(ControlFreeHandle), } @@ -473,7 +488,7 @@ pub struct ReconciliationSendFingerprint { } /// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct ReconciliationAnnounceEntries { /// The 3dRange whose LengthyEntries to transmit. pub range: ThreeDRange, @@ -525,23 +540,55 @@ impl LengthyEntry { } } -#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)] -pub struct Fingerprint([u8; 32]); +#[derive(Debug, Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] +pub struct Fingerprint(pub [u8; 32]); impl Fingerprint { + pub fn add_entry(&mut self, entry: &Entry) { + let next = Fingerprint(*Hash::new(&entry.encode()).as_bytes()); + *self ^= next; + } + + pub fn add_entries<'a>(&mut self, iter: impl Iterator) { + for entry in iter { + self.add_entry(entry); + } + } + + pub fn from_entries<'a>(iter: impl Iterator) -> Self { + let mut this = Self::default(); + this.add_entries(iter); + this + } +} + +impl std::ops::BitXorAssign for Fingerprint { + fn bitxor_assign(&mut self, rhs: Self) { + for (a, b) in self.0.iter_mut().zip(rhs.0.iter()) { + *a ^= b; + } + } } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ThreeDRange { - paths: Range, - subspaces: Range, - times: Range, + pub paths: Range, + pub subspaces: Range, + pub times: Range, +} + +impl ThreeDRange { + pub fn includes_entry(&self, entry: &Entry) -> bool { + self.subspaces.includes(&entry.subspace_id) + && self.paths.includes(&entry.path) + && self.times.includes(&entry.timestamp) + } } #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] pub struct Range { - start: T, - end: RangeEnd, + pub start: T, + pub end: RangeEnd, } impl Range { diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index e467c06074..37b699b26e 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -6,8 +6,10 @@ use serde::{Deserialize, Serialize}; use zerocopy::{native_endian::U64, FromBytes, IntoBytes, KnownLayout, NoCell, Unaligned}; use super::{ - keys::{self, PUBLIC_KEY_LENGTH}, - meadowcap::{self, is_authorised_write}, + keys::{self, UserSecretKey, PUBLIC_KEY_LENGTH}, + meadowcap::{ + self, attach_authorisation, create_token, is_authorised_write, InvalidParams, McCapability, + }, }; pub type NamespaceId = keys::NamespaceId; @@ -180,8 +182,29 @@ impl std::ops::Deref for Path { } } +impl PartialOrd for Path { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Path { + fn cmp(&self, other: &Self) -> Ordering { + for (i, component) in self.iter().enumerate() { + match other.get(i) { + Some(other_component) => match component.cmp(other_component) { + Ordering::Equal => continue, + ordering @ _ => return ordering, + }, + None => return Ordering::Greater, + } + } + Ordering::Equal + } +} + /// The metadata for storing a Payload. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub struct Entry { /// The identifier of the namespace to which the Entry belongs. pub namespace_id: NamespaceId, @@ -228,6 +251,14 @@ impl Entry { self > other } + pub fn attach_authorisation( + self, + capability: McCapability, + secret_key: &UserSecretKey, + ) -> Result { + attach_authorisation(self, capability, secret_key) + } + /// Convert the entry to a byte slice. /// /// This is invoked to create the signable for signatures over the entry. Thus, any change in @@ -281,7 +312,7 @@ impl TryFrom for AuthorisedEntry { } /// An AuthorisedEntry is a PossiblyAuthorisedEntry for which is_authorised_write returns true. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct AuthorisedEntry(Entry, AuthorisationToken); impl AuthorisedEntry { @@ -292,6 +323,14 @@ impl AuthorisedEntry { PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() } + pub fn entry(&self) -> &Entry { + &self.0 + } + + pub fn into_entry(self) -> Entry { + self.0 + } + pub fn is_authorised(&self) -> bool { true } @@ -304,6 +343,10 @@ impl AuthorisedEntry { pub fn into_parts(self) -> (Entry, AuthorisationToken) { (self.0, self.1) } + + pub fn namespace_id(&self) -> NamespaceId { + self.1.capability.granted_namespace().into() + } } // impl std::ops::Deref for AuthorisedEntry { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 7333aaeb52..791c747fdd 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -3,11 +3,11 @@ use std::collections::{hash_map, HashMap, VecDeque}; use anyhow::bail; use ed25519_dalek::SignatureError; -use tracing::warn; +use tracing::{debug, warn}; use crate::{ proto::{ - keys::{NamespaceId, NamespacePublicKey, Signature, UserSecretKey}, + keys::{NamespaceId, NamespacePublicKey, UserSecretKey, UserSignature}, meadowcap::{is_authorised_write, InvalidCapability}, wgps::{ AccessChallenge, Area, ChallengeHash, CommitmentReveal, Fingerprint, Handle, @@ -169,6 +169,8 @@ pub struct Session { us: PeerState, them: PeerState, + + done: bool, } #[derive(Debug)] @@ -202,11 +204,11 @@ pub struct PeerState { #[derive(Debug)] pub struct SessionInit { - user_secret_key: UserSecretKey, + pub user_secret_key: UserSecretKey, // TODO: allow multiple capabilities - capability: ReadCapability, + pub capability: ReadCapability, // TODO: allow multiple areas of interest - area_of_interest: AreaOfInterest, + pub area_of_interest: AreaOfInterest, } impl Session { @@ -229,13 +231,14 @@ impl Session { them: Default::default(), our_current_aoi: None, // config init: Some(init), + done: false, }; let msg = CommitmentReveal { nonce: our_nonce }; this.control_channel.send(msg); this } - fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { + fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { let challenge = self .challenge .as_ref() @@ -254,7 +257,8 @@ impl Session { let area_of_interest = init.area_of_interest.clone(); let capability = init.capability.clone(); - if *capability.receiver() == init.user_secret_key.public_key() { + debug!(role=?self.our_role, ?init, "init"); + if *capability.receiver() != init.user_secret_key.public_key() { return Err(Error::WrongSecretKeyForCapability); } @@ -296,21 +300,17 @@ impl Session { } } - pub fn process(&mut self, store: &mut S) -> Result<(), Error> { + pub fn process(&mut self, store: &mut S) -> Result { while let Some(message) = self.control_channel.inbox_pop() { self.process_control(store, message)?; } while let Some(message) = self.reconciliation_channel.inbox_pop() { self.process_reconciliation(store, message)?; } - Ok(()) + Ok(self.done) } - pub fn process_control( - &mut self, - store: &mut S, - message: Message, - ) -> Result<(), Error> { + fn process_control(&mut self, store: &mut S, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { if self.challenge.is_some() { @@ -396,7 +396,6 @@ impl Session { let range = area.into_range(); let fingerprint = store.range_fingerprint(namespace.into(), &range)?; - let msg = ReconciliationSendFingerprint { range, fingerprint, @@ -407,8 +406,62 @@ impl Session { Ok(()) } + // fn send_fingerprint(&mut self, store: &mut S, ) + + fn announce_entries( + &mut self, + store: &mut S, + namespace: NamespaceId, + range: &ThreeDRange, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + want_response: bool, + ) -> Result<(), Error> { + for part in store.split_range(namespace, &range)?.into_iter() { + match part { + RangeSplitPart::SendFingerprint(range, fingerprint) => { + let msg = ReconciliationSendFingerprint { + range, + fingerprint, + sender_handle: our_handle, + receiver_handle: their_handle, + }; + self.reconciliation_channel + .send(Message::ReconciliationSendFingerprint(msg)); + } + RangeSplitPart::SendEntries(range, local_count) => { + let msg = ReconciliationAnnounceEntries { + range: range.clone(), + count: local_count, + want_response, + will_sort: false, // todo: sorted? + sender_handle: our_handle, + receiver_handle: their_handle, + }; + self.reconciliation_channel.send(msg); + for authorised_entry in store.get_entries_with_authorisation(namespace, &range) + { + let authorised_entry = authorised_entry?; + let (entry, token) = authorised_entry.into_parts(); + let (static_token, dynamic_token) = token.into_parts(); + // todo: partial entries + let available = entry.payload_length; + let static_token_handle = self.bind_static_token(static_token); + let msg = ReconciliationSendEntry { + entry: LengthyEntry::new(entry, available), + static_token_handle, + dynamic_token, + }; + self.reconciliation_channel.send(msg); + } + } + } + } + Ok(()) + } + /// Uses the blocking [`Store`] and thus may only be called in the worker thread. - pub fn process_reconciliation( + fn process_reconciliation( &mut self, store: &mut S, message: Message, @@ -422,7 +475,8 @@ impl Session { receiver_handle, } = message; - let namespace = self.authorise_range(&range, &receiver_handle, &sender_handle)?; + let namespace = + self.range_is_authorised(&range, &receiver_handle, &sender_handle)?; let our_fingerprint = store.range_fingerprint(namespace, &range)?; // case 1: fingerprint match. @@ -437,63 +491,48 @@ impl Session { }; self.reconciliation_channel .send(Message::ReconciliationAnnounceEntries(msg)); + // TODO: This is likely incorrect + self.done = true; } else { - for part in store.split_range(namespace, &range)?.into_iter() { - match part { - RangeSplitPart::SendFingerprint(range, fingerprint) => { - let msg = ReconciliationSendFingerprint { - range, - fingerprint, - sender_handle, - receiver_handle, - }; - self.reconciliation_channel - .send(Message::ReconciliationSendFingerprint(msg)); - } - RangeSplitPart::SendEntries(range, local_count) => { - let msg = ReconciliationAnnounceEntries { - range: range.clone(), - count: local_count, - want_response: true, - will_sort: false, // todo: sorted? - sender_handle, - receiver_handle, - }; - self.reconciliation_channel.send(msg); - for authorised_entry in - store.get_entries_with_authorisation(namespace, &range) - { - let authorised_entry = authorised_entry?; - let (entry, token) = authorised_entry.into_parts(); - let (static_token, dynamic_token) = token.into_parts(); - // todo: partial entries - let available = entry.payload_length; - let static_token_handle = self.bind_static_token(static_token); - let msg = ReconciliationSendEntry { - entry: LengthyEntry::new(entry, available), - static_token_handle, - dynamic_token, - }; - self.reconciliation_channel.send(msg); - } - } - } - } + self.announce_entries( + store, + namespace, + &range, + receiver_handle, + sender_handle, + true, + )?; } } Message::ReconciliationAnnounceEntries(message) => { + let ReconciliationAnnounceEntries { + range, + count, + want_response, + will_sort: _, + sender_handle, + receiver_handle, + } = &message; if self.them.reconciliation_announce_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } - self.authorise_range( - &message.range, - &message.receiver_handle, - &message.sender_handle, - )?; - if message.count == 0 { + let namespace = + self.range_is_authorised(&range, &receiver_handle, &sender_handle)?; + if *count == 0 && !want_response { // todo: what do we need to do here? + self.done = true; } else { - self.them.reconciliation_announce_entries = Some(message) + self.them.reconciliation_announce_entries = Some(message.clone()); + } + if *want_response { + self.announce_entries( + store, + namespace, + range, + *receiver_handle, + *sender_handle, + false, + )?; } } Message::ReconciliationSendEntry(message) => { @@ -525,7 +564,7 @@ impl Session { Ok(()) } - fn authorise_range( + fn range_is_authorised( &self, range: &ThreeDRange, receiver_handle: &AreaOfInterestHandle, @@ -582,12 +621,9 @@ pub struct Channel { outbox: VecDeque, // issued_guarantees: usize, } -impl Default for Channel { +impl Default for Channel { fn default() -> Self { - Self { - inbox: Default::default(), - outbox: Default::default(), - } + Self::with_capacity(1024) } } @@ -671,58 +707,3 @@ fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { } res } - -// #[derive(Debug, derive_more::From, derive_more::TryInto)] -// pub enum ScopedMessage { -// Control(ControlMessage), -// Reconciliation(ReconciliationMessage), -// } -// -// impl From for ScopedMessage { -// fn from(value: Message) -> Self { -// match value { -// Message::ReconciliationSendFingerprint(msg) => Self::Reconciliation(msg.into()), -// Message::ReconciliationAnnounceEntries(msg) => Self::Reconciliation(msg.into()), -// Message::ReconciliationSendEntry(msg) => Self::Reconciliation(msg.into()), -// -// Message::CommitmentReveal(msg) => Self::Control(msg.into()), -// Message::SetupBindStaticToken(msg) => Self::Control(msg.into()), -// Message::SetupBindReadCapability(msg) => Self::Control(msg.into()), -// Message::SetupBindAreaOfInterest(msg) => Self::Control(msg.into()), -// -// Message::ControlIssueGuarantee(msg) => Self::Control(msg.into()), -// Message::ControlAbsolve(msg) => Self::Control(msg.into()), -// Message::ControlPlead(msg) => Self::Control(msg.into()), -// Message::ControlAnnounceDropping(msg) => Self::Control(msg.into()), -// Message::ControlApologise(msg) => Self::Control(msg.into()), -// Message::ControlFreeHandle(msg) => Self::Control(msg.into()), -// } -// } -// } -// -// #[derive(Debug, derive_more::From)] -// pub enum ReconciliationMessage { -// SendFingerprint(ReconciliationSendFingerprint), -// AnnounceEntries(ReconciliationAnnounceEntries), -// SendEntry(ReconciliationSendEntry), -// } -// -// #[derive(Debug, derive_more::From)] -// pub enum ControlMessage { -// CommitmentReveal(CommitmentReveal), -// // TODO: move to CapabilityChannel -// SetupBindReadCapability(SetupBindReadCapability), -// // TODO: move to StaticTokenChannel -// SetupBindStaticToken(SetupBindStaticToken), -// // TODO: move to AreaOfInterestChannel -// SetupBindAreaOfInterest(SetupBindAreaOfInterest), -// -// IssueGuarantee(ControlIssueGuarantee), -// Absolve(ControlAbsolve), -// Plead(ControlPlead), -// AnnounceDropping(ControlAnnounceDropping), -// Apologise(ControlApologise), -// -// FreeHandle(ControlFreeHandle), -// } -// diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 956d989a82..dcf545e682 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,8 +1,10 @@ use std::collections::HashMap; +use iroh_base::hash::Hash; + use crate::proto::{ - wgps::{Area, Fingerprint, ThreeDRange}, - willow::{AuthorisedEntry, NamespaceId}, + wgps::{Area, Fingerprint, RangeEnd, ThreeDRange}, + willow::{AuthorisedEntry, Entry, NamespaceId}, }; pub trait Store { @@ -18,6 +20,15 @@ pub trait Store { range: &ThreeDRange, ) -> anyhow::Result; + fn get_entries<'a>( + &'a mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a { + self.get_entries_with_authorisation(namespace, range) + .map(|e| e.map(|e| e.into_entry())) + } + fn get_entries_with_authorisation<'a>( &'a mut self, namespace: NamespaceId, @@ -38,9 +49,12 @@ impl Store for MemoryStore { namespace: NamespaceId, range: &ThreeDRange, ) -> anyhow::Result { - let _ = namespace; - let _ = range; - todo!() + let mut fingerprint = Fingerprint::default(); + for entry in self.get_entries(namespace, range) { + let entry = entry?; + fingerprint.add_entry(&entry); + } + Ok(fingerprint) } fn split_range( @@ -48,9 +62,47 @@ impl Store for MemoryStore { namespace: NamespaceId, range: &ThreeDRange, ) -> anyhow::Result { - let _ = namespace; - let _ = range; - todo!() + let count = self.get_entries(namespace, range).count(); + let split_if_more_than = 2; + let res = if count > split_if_more_than { + let mut entries: Vec<_> = self + .get_entries(namespace, range) + .filter_map(|e| e.ok()) + .collect(); + let pivot_index = count / 2; + let right = entries.split_off(pivot_index); + let left = entries; + + let pivot = right.first().unwrap(); + let mut range_left = range.clone(); + range_left.paths.end = RangeEnd::Closed(pivot.path.clone()); + range_left.times.end = RangeEnd::Closed(pivot.timestamp); + range_left.subspaces.end = RangeEnd::Closed(pivot.subspace_id); + + let mut range_right = range.clone(); + range_right.paths.start = pivot.path.clone(); + range_right.times.start = pivot.timestamp; + range_right.subspaces.start = pivot.subspace_id; + + let left_part = if left.len() > split_if_more_than { + let fp = Fingerprint::from_entries(left.iter()); + RangeSplitPart::SendFingerprint(range_left, fp) + } else { + RangeSplitPart::SendEntries(range_left, left.len() as u64) + }; + + let right_part = if left.len() > split_if_more_than { + let fp = Fingerprint::from_entries(right.iter()); + RangeSplitPart::SendFingerprint(range_right, fp) + } else { + RangeSplitPart::SendEntries(range_right, right.len() as u64) + }; + + RangeSplit::SendSplit([left_part, right_part]) + } else { + RangeSplit::SendEntries(range.clone(), count as u64) + }; + Ok(res) } fn get_entries_with_authorisation<'a>( @@ -58,14 +110,37 @@ impl Store for MemoryStore { namespace: NamespaceId, range: &ThreeDRange, ) -> impl Iterator> + 'a { - let _ = namespace; - let _ = range; - None.into_iter() + self.entries + .get(&namespace) + .into_iter() + .flatten() + .filter(|entry| range.includes_entry(entry.entry())) + .map(|e| Result::<_, anyhow::Error>::Ok(e.clone())) + .collect::>() + .into_iter() } fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> anyhow::Result<()> { - let _ = entry; - todo!() + let entries = self.entries.entry(entry.namespace_id()).or_default(); + let new = entry.entry(); + let mut to_remove = vec![]; + for (i, other) in entries.iter().enumerate() { + let old = other.entry(); + if old.subspace_id == new.subspace_id && old.path.is_prefix_of(&new.path) && old >= new + { + // we cannot insert the entry, a newer entry exists + return Ok(()); + } + if new.subspace_id == old.subspace_id && new.path.is_prefix_of(&old.path) && new > old { + to_remove.push(i); + } + } + for i in to_remove { + entries.remove(i); + } + + entries.push(entry.clone()); + Ok(()) } } From bd4db24431a474463afb3834141274b612a63553 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 23 Apr 2024 15:49:26 +0200 Subject: [PATCH 005/198] chore: fmt --- iroh-willow/src/proto/keys.rs | 2 -- iroh-willow/src/session.rs | 19 ++++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 1016c1f499..fd3e39a758 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -24,7 +24,6 @@ impl NamespaceSignature { pub fn fmt_short(&self) -> String { base32::fmt_short(&self.to_bytes()) } - } impl fmt::Display for NamespaceSignature { @@ -47,7 +46,6 @@ impl UserSignature { pub fn fmt_short(&self) -> String { base32::fmt_short(&self.to_bytes()) } - } impl fmt::Display for UserSignature { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 791c747fdd..a0720b6e43 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -10,9 +10,14 @@ use crate::{ keys::{NamespaceId, NamespacePublicKey, UserSecretKey, UserSignature}, meadowcap::{is_authorised_write, InvalidCapability}, wgps::{ - AccessChallenge, Area, ChallengeHash, CommitmentReveal, Fingerprint, Handle, - HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, - StaticTokenHandle, ThreeDRange, CHALLENGE_HASH_LENGTH, + AccessChallenge, Area, AreaOfInterest, AreaOfInterestHandle, CapabilityHandle, + ChallengeHash, CommitmentReveal, ControlAbsolve, ControlAnnounceDropping, + ControlApologise, ControlFreeHandle, ControlIssueGuarantee, ControlPlead, Fingerprint, + Handle, HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, + ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, + SetupBindStaticToken, StaticToken, StaticTokenHandle, ThreeDRange, + CHALLENGE_HASH_LENGTH, }, willow::{ AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, @@ -21,14 +26,6 @@ use crate::{ store::{RangeSplitPart, Store}, }; -use super::proto::wgps::{ - AreaOfInterest, AreaOfInterestHandle, CapabilityHandle, ControlAbsolve, - ControlAnnounceDropping, ControlApologise, ControlFreeHandle, ControlIssueGuarantee, - ControlPlead, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, - SetupBindStaticToken, StaticToken, -}; - #[derive(Debug)] struct ResourceMap { next_handle: u64, From b61e29919b3b9e4f3080a71121a3fbef08476709 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 25 Apr 2024 00:53:04 +0200 Subject: [PATCH 006/198] more progress --- iroh-willow/src/net.rs | 157 +++++--- iroh-willow/src/net/codec.rs | 3 +- iroh-willow/src/proto.rs | 1 + iroh-willow/src/proto/keys.rs | 464 ++++++++-------------- iroh-willow/src/proto/meadowcap.rs | 96 +++-- iroh-willow/src/proto/wgps.rs | 373 ++---------------- iroh-willow/src/proto/willow.rs | 236 ++++++------ iroh-willow/src/session.rs | 591 ++++++++++++++++++++--------- iroh-willow/src/store.rs | 286 +++++++++----- 9 files changed, 1076 insertions(+), 1131 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 7a0110215a..e61b091ca1 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,15 +1,17 @@ -use anyhow::ensure; -use futures::{FutureExt, SinkExt, TryFutureExt}; +use std::{pin::Pin, task::Poll}; + +use anyhow::{ensure, Context}; +use futures::{FutureExt, SinkExt, Stream, TryFutureExt}; use iroh_base::hash::Hash; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio_stream::StreamExt; -use tokio_util::codec::{FramedRead, FramedWrite}; -use tracing::debug; +use tokio_util::codec::{Decoder, FramedRead, FramedWrite}; +use tracing::{debug, instrument}; use crate::{ proto::wgps::{ AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, CHALLENGE_LENGTH, - MAXIMUM_PAYLOAD_SIZE_POWER, + MAX_PAYLOAD_SIZE_POWER, }, session::{Role, Session, SessionInit}, store::{MemoryStore, Store}, @@ -19,6 +21,18 @@ use self::codec::WillowCodec; pub mod codec; +/// Read the next frame from a [`FramedRead`] but only if it is available without waiting on IO. +async fn next_if_ready( + mut reader: &mut FramedRead, +) -> Option> { + futures::future::poll_fn(|cx| match Pin::new(&mut reader).poll_next(cx) { + Poll::Ready(r) => Poll::Ready(r), + Poll::Pending => Poll::Ready(None), + }) + .await +} + +#[instrument(skip_all, fields(role=?role))] async fn run( store: &mut S, conn: quinn::Connection, @@ -32,53 +46,49 @@ async fn run( let our_nonce: AccessChallenge = rand::random(); debug!(?role, "start"); - let (received_commitment, maximum_payload_size) = + let (received_commitment, max_payload_size) = exchange_commitments(&mut send, &mut recv, &our_nonce).await?; debug!(?role, "exchanged comittments"); - let mut session = Session::new( - role, - our_nonce, - maximum_payload_size, - received_commitment, - init, - ); + let mut session = Session::new(role, our_nonce, max_payload_size, received_commitment, init); let mut reader = FramedRead::new(recv, WillowCodec); let mut writer = FramedWrite::new(send, WillowCodec); - // move to store thread for this! + // TODO: blocking! session.process(store)?; - // back in network land: send out everything - // should be in parallel with reading + // send out initial messages for message in session.drain_outbox() { debug!(role=?role, ?message, "send"); writer.send(message).await?; } - while let Some(message) = reader.try_next().await? { - debug!(?role, ?message, "recv"); - // TODO: buffer more than a single message here before handing off to store thread - // what we should do here: - // * notify store thread that we want to process - // * keep reading and pushing into session, until session is full - // * once store thread is ready for us: be notified of that, and hand over session to store - // thread + while let Some(message) = reader.next().await { + let message = message.context("error from reader")?; + debug!(%message,awaited=true, "recv"); session.recv(message.into()); - // move to store thread for this! + // keep pushing already buffered messages + while let Some(message) = next_if_ready(&mut reader).await { + let message = message.context("error from reader")?; + debug!(%message,awaited=false, "recv"); + // TODO: stop when session is full + session.recv(message.into()); + } + + // TODO: blocking! let done = session.process(store)?; + debug!(?done, "process done"); - // back in network land: send out everything - // should be in parallel with reading for message in session.drain_outbox() { - debug!(role=?role, ?message, "send"); + debug!(%message, "send"); writer.send(message).await?; } if done { - break; + debug!("close"); + writer.close().await?; } } Ok(()) @@ -90,35 +100,36 @@ async fn exchange_commitments( our_nonce: &AccessChallenge, ) -> anyhow::Result<(ChallengeHash, usize)> { let challenge_hash = Hash::new(&our_nonce); - send.write_u8(MAXIMUM_PAYLOAD_SIZE_POWER).await?; + send.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; send.write_all(challenge_hash.as_bytes()).await?; - let their_maximum_payload_size_power = recv.read_u8().await?; - ensure!( - their_maximum_payload_size_power <= 64, - "maximum payload size too large" - ); - let their_maximum_payload_size = 2usize.pow(their_maximum_payload_size_power as u32); + let their_max_payload_size = { + let power = recv.read_u8().await?; + ensure!(power <= 64, "max payload size too large"); + 2usize.pow(power as u32) + }; let mut received_commitment = [0u8; CHALLENGE_HASH_LENGTH]; recv.read_exact(&mut received_commitment).await?; - Ok((received_commitment, their_maximum_payload_size)) + Ok((received_commitment, their_max_payload_size)) } #[cfg(test)] mod tests { + use std::{collections::HashSet, time::Instant}; + use iroh_base::hash::Hash; use iroh_net::MagicEndpoint; use rand::SeedableRng; - use tracing::debug; + use tracing::{debug, info}; use crate::{ net::run, proto::{ - keys::{NamespaceId, NamespaceSecretKey, NamespaceType, UserSecretKey}, + grouping::{AreaOfInterest, ThreeDRange}, + keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, - wgps::{AreaOfInterest, ReadCapability}, - willow::{Entry, Path}, + willow::{AuthorisedEntry, Entry, Path, SubspaceId}, }, session::{Role, SessionInit}, store::{MemoryStore, Store}, @@ -130,6 +141,8 @@ mod tests { async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let n_betty = 10; + let n_alfie = 20; let ep_alfie = MagicEndpoint::builder() .alpns(vec![ALPN.to_vec()]) @@ -152,27 +165,28 @@ mod tests { ); let conn_alfie = conn_alfie.unwrap(); let conn_betty = conn_betty.unwrap(); - debug!("connected"); + info!("connected! now start reconciliation"); - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceType::Owned); + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); let namespace_id: NamespaceId = namespace_secret.public_key().into(); + let start = Instant::now(); + let mut expected_entries = HashSet::new(); let mut store_alfie = MemoryStore::default(); let init_alfie = { let secret_key = UserSecretKey::generate(&mut rng); let public_key = secret_key.public_key(); - let read_capability = ReadCapability::Owned(OwnedCapability::new( + let read_capability = McCapability::Owned(OwnedCapability::new( &namespace_secret, public_key, AccessMode::Read, )); - let area_of_interest = AreaOfInterest::full(); let write_capability = McCapability::Owned(OwnedCapability::new( &namespace_secret, public_key, AccessMode::Write, )); - for i in 0..3 { + for i in 0..n_alfie { let p = format!("alfie{i}"); let entry = Entry { namespace_id, @@ -182,9 +196,11 @@ mod tests { payload_length: 2, payload_digest: Hash::new("cool things"), }; + expected_entries.insert(entry.clone()); let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; store_alfie.ingest_entry(&entry)?; } + let area_of_interest = AreaOfInterest::full(); SessionInit { user_secret_key: secret_key, capability: read_capability, @@ -201,13 +217,12 @@ mod tests { public_key, AccessMode::Read, )); - let area_of_interest = AreaOfInterest::full(); let write_capability = McCapability::Owned(OwnedCapability::new( &namespace_secret, public_key, AccessMode::Write, )); - for i in 0..3 { + for i in 0..n_betty { let p = format!("betty{i}"); let entry = Entry { namespace_id, @@ -217,9 +232,11 @@ mod tests { payload_length: 2, payload_digest: Hash::new("cool things"), }; + expected_entries.insert(entry.clone()); let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; store_betty.ingest_entry(&entry)?; } + let area_of_interest = AreaOfInterest::full(); SessionInit { user_secret_key: secret_key, capability: read_capability, @@ -233,11 +250,47 @@ mod tests { run(&mut store_alfie, conn_alfie, Role::Alfie, init_alfie), run(&mut store_betty, conn_betty, Role::Betty, init_betty), ); - res_alfie.unwrap(); - res_betty.unwrap(); - println!("alfie {:?}", store_alfie); - println!("betty {:?}", store_betty); + info!(time=?start.elapsed(), "reconciliation finished!"); + + info!("alfie res {:?}", res_alfie); + info!("betty res {:?}", res_betty); + info!( + "alfie store {:?}", + get_entries_debug(&mut store_alfie, namespace_id) + ); + info!( + "betty store {:?}", + get_entries_debug(&mut store_betty, namespace_id) + ); + + assert!(res_alfie.is_ok()); + assert!(res_betty.is_ok()); + assert_eq!( + get_entries(&mut store_alfie, namespace_id), + expected_entries + ); + assert_eq!( + get_entries(&mut store_betty, namespace_id), + expected_entries + ); Ok(()) } + fn get_entries(store: &mut S, namespace: NamespaceId) -> HashSet { + store + .get_entries(namespace, &ThreeDRange::all()) + .filter_map(Result::ok) + .collect() + } + + fn get_entries_debug( + store: &mut S, + namespace: NamespaceId, + ) -> Vec<(SubspaceId, Path)> { + store + .get_entries(namespace, &ThreeDRange::all()) + .filter_map(|r| r.ok()) + .map(|e| (e.subspace_id, e.path)) + .collect() + } } diff --git a/iroh-willow/src/net/codec.rs b/iroh-willow/src/net/codec.rs index 288820546d..ba4018c3ec 100644 --- a/iroh-willow/src/net/codec.rs +++ b/iroh-willow/src/net/codec.rs @@ -37,8 +37,7 @@ impl Encoder for WillowCodec { type Error = anyhow::Error; fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), Self::Error> { - let len = - postcard::serialize_with_flavor(&item, postcard::ser_flavors::Size::default()).unwrap(); + let len = postcard::experimental::serialized_size(&item)?; ensure!( len <= MAX_MESSAGE_SIZE, "attempting to send message that is too large {}", diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 204cf69956..cf3e8947ab 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,4 +1,5 @@ pub mod keys; pub mod meadowcap; +pub mod grouping; pub mod wgps; pub mod willow; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index fd3e39a758..de69a5e163 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -1,184 +1,76 @@ -//! Keys used in iroh-sync +//! Public-key crypto types for willow +//! +//! This modules defines types which are wrappers around [`ed25519_dalek`] public-key crypto types. +//! +//! We also define an Id type which is a public key represented as [u8; 32], which is smaller than +//! the expanded PublicKey representation needed for signature verification use std::{cmp::Ordering, fmt, str::FromStr}; +use derive_more::{AsRef, Deref, From, Into}; use ed25519_dalek::{SignatureError, Signer, SigningKey, VerifyingKey}; use iroh_base::base32; use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; -// use crate::store::PublicKeyStore; - pub const PUBLIC_KEY_LENGTH: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; pub const SECRET_KEY_LENGTH: usize = ed25519_dalek::SECRET_KEY_LENGTH; pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; -pub type SubspaceId = UserId; - -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct NamespaceSignature(ed25519_dalek::Signature); - -impl NamespaceSignature { - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(&self.to_bytes()) - } -} - -impl fmt::Display for NamespaceSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(&self.to_bytes())) - } -} -impl fmt::Debug for NamespaceSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespaceSignature({})", self.fmt_short()) - } -} - -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct UserSignature(ed25519_dalek::Signature); - -impl UserSignature { - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(&self.to_bytes()) - } -} - -impl fmt::Display for UserSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(&self.to_bytes())) - } -} -impl fmt::Debug for UserSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserSignature({})", self.fmt_short()) - } -} +/// Helper macro to implement formatting traits for bytestring like types +macro_rules! bytestring { + ($ty:ty, $n:ident) => { + impl $ty { + /// Convert to a base32 string limited to the first 10 bytes for a friendly string + /// representation of the key. + pub fn fmt_short(&self) -> String { + base32::fmt_short(&self.to_bytes()) + } + } -impl std::ops::Deref for UserSignature { - type Target = ed25519_dalek::Signature; - fn deref(&self) -> &Self::Target { - &self.0 - } -} + impl fmt::Display for $ty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", base32::fmt(&self.to_bytes())) + } + } -impl std::ops::Deref for NamespaceSignature { - type Target = ed25519_dalek::Signature; - fn deref(&self) -> &Self::Target { - &self.0 - } + impl fmt::Debug for $ty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}({})", stringify!($ty), self.fmt_short()) + } + } + }; } -/// User key to insert entries in a [`crate::Replica`] -/// -/// Internally, an author is a [`SigningKey`] which is used to sign entries. -#[derive(Clone, Serialize, Deserialize)] -pub struct UserSecretKey(SigningKey); - -impl UserSecretKey { - /// Create a new [`UserSecretKey`] with a random key. - pub fn generate(rng: &mut R) -> Self { - let signing_key = SigningKey::generate(rng); - UserSecretKey(signing_key) - } - - /// Create an [`UserSecretKey`] from a byte array. - pub fn from_bytes(bytes: &[u8; 32]) -> Self { - SigningKey::from_bytes(bytes).into() - } - - /// Returns the [`UserSecretKey`] byte representation. - pub fn to_bytes(&self) -> [u8; 32] { - self.0.to_bytes() - } - - /// Get the [`UserPublicKey`] for this author. - pub fn public_key(&self) -> UserPublicKey { - UserPublicKey(self.0.verifying_key()) - } - - /// Get the [`UserId`] for this author. - pub fn id(&self) -> UserId { - UserId::from(self.public_key()) - } - - /// Sign a message with this [`UserSecretKey`] key. - pub fn sign(&self, msg: &[u8]) -> UserSignature { - UserSignature(self.0.sign(msg)) - } - - /// Strictly verify a signature on a message with this [`UserSecretKey`]'s public key. - pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, &signature.0) - } - - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(&self.to_bytes()) - } +/// Returns `true` if the last bit of a byte slice is 1, which defines a communal namespace in this +/// willow implementation. +fn is_communal(pubkey_bytes: &[u8; 32]) -> bool { + let last = pubkey_bytes.last().expect("pubkey is not empty"); + // Check if last bit is 1. + (*last & 0x1) == 0x1 } -/// Identifier for an [`UserSecretKey`] +/// The type of the namespace, either communal or owned. /// -/// This is the corresponding [`VerifyingKey`] for an author. It is used as an identifier, and can -/// be used to verify [`Signature`]s. -#[derive(Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash, derive_more::From)] -pub struct UserPublicKey(VerifyingKey); - -impl UserPublicKey { - /// Verify that a signature matches the `msg` bytes and was created with the [`UserSecretKey`] - /// that corresponds to this [`UserId`]. - pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { - self.0.verify_strict(msg, &signature.0) - } - - /// Get the byte representation of this [`UserId`]. - pub fn as_bytes(&self) -> &[u8; 32] { - self.0.as_bytes() - } - - /// Get the byte representation of this [`UserId`]. - pub fn to_bytes(&self) -> [u8; 32] { - *self.0.as_bytes() - } - - /// Create from a slice of bytes. - /// - /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] - /// curve point. Will never fail for a byte array returned from [`Self::as_bytes`]. - /// See [`VerifyingKey::from_bytes`] for details. - pub fn from_bytes(bytes: &[u8; 32]) -> Result { - Ok(UserPublicKey(VerifyingKey::from_bytes(bytes)?)) - } - - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(&self.to_bytes()) - } -} - +/// A [`NamespacePublicKey`] whose last bit is 1 is defined to be a communal namespace, +/// and if the last bit is zero it is an owned namespace. #[derive(Debug, Eq, PartialEq, Copy, Clone)] -pub enum NamespaceType { +pub enum NamespaceKind { + /// Communal namespace, needs [`super::meadowcap::CommunalCapability`] to authorizse. Communal, + /// Owned namespace, neads [`super::meadowcap::OwnedCapability`] to authorize. Owned, } -/// Namespace key of a [`crate::Replica`]. -/// -/// Holders of this key can insert new entries into a [`crate::Replica`]. -/// Internally, a [`NamespaceSecretKey] is a [`SigningKey`] which is used to sign entries. +/// Namespace secret key. #[derive(Clone, Serialize, Deserialize)] pub struct NamespaceSecretKey(SigningKey); +bytestring!(NamespaceSecretKey, PUBLIC_KEY_LENGTH); + impl NamespaceSecretKey { - /// Create a new [`NamespaceSecretKey] with a random key. - pub fn generate(rng: &mut R, typ: NamespaceType) -> Self { + /// Create a new, random [`NamespaceSecretKey] with an encoded [`NamespaceKind`]. + pub fn generate(rng: &mut R, typ: NamespaceKind) -> Self { loop { let signing_key = SigningKey::generate(rng); let secret_key = NamespaceSecretKey(signing_key); @@ -193,7 +85,7 @@ impl NamespaceSecretKey { SigningKey::from_bytes(bytes).into() } - /// Returns the [`NamespaceSecretKey] byte representation. + /// Convert into a byte array. pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } @@ -217,29 +109,25 @@ impl NamespaceSecretKey { pub fn verify(&self, msg: &[u8], signature: &NamespaceSignature) -> Result<(), SignatureError> { self.0.verify_strict(msg, &signature.0) } - - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(&self.to_bytes()) - } } -/// The corresponding [`VerifyingKey`] for a [`NamespaceSecretKey]. -/// It is used as an identifier, and can be used to verify [`Signature`]s. +/// The corresponding public key for a [`NamespaceSecretKey]. #[derive(Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, derive_more::From)] pub struct NamespacePublicKey(VerifyingKey); +bytestring!(NamespacePublicKey, PUBLIC_KEY_LENGTH); + impl NamespacePublicKey { /// Whether this is the key for a communal namespace. pub fn is_communal(&self) -> bool { is_communal(self.as_bytes()) } - pub fn namespace_type(&self) -> NamespaceType { - match self.is_communal() { - true => NamespaceType::Communal, - false => NamespaceType::Owned, + pub fn namespace_type(&self) -> NamespaceKind { + if self.is_communal() { + NamespaceKind::Communal + } else { + NamespaceKind::Owned } } @@ -249,11 +137,16 @@ impl NamespacePublicKey { self.0.verify_strict(msg, &signature.0) } - /// Get the byte representation of this [`NamespaceId`]. + /// Get this [`NamespaceId`] as a byte slice. pub fn as_bytes(&self) -> &[u8; 32] { self.0.as_bytes() } + /// Convert into a byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + /// Create from a slice of bytes. /// /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] @@ -262,83 +155,82 @@ impl NamespacePublicKey { pub fn from_bytes(bytes: &[u8; 32]) -> Result { Ok(NamespacePublicKey(VerifyingKey::from_bytes(bytes)?)) } - - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(self.as_bytes()) - } } -impl fmt::Display for UserSecretKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.to_bytes())) - } -} +/// User secret key. +#[derive(Clone, Serialize, Deserialize)] +pub struct UserSecretKey(SigningKey); + +bytestring!(UserSecretKey, SECRET_KEY_LENGTH); -impl fmt::Display for NamespaceSecretKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.to_bytes())) +impl UserSecretKey { + /// Create a new [`UserSecretKey`] with a random key. + pub fn generate(rng: &mut R) -> Self { + let signing_key = SigningKey::generate(rng); + UserSecretKey(signing_key) } -} -impl fmt::Display for UserPublicKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.as_bytes())) + /// Create from a byte slice. + pub fn from_bytes(bytes: &[u8; 32]) -> Self { + SigningKey::from_bytes(bytes).into() } -} -impl fmt::Display for NamespacePublicKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.as_bytes())) + /// Convert into a byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() } -} -impl fmt::Display for UserId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.as_bytes())) + /// Get the [`UserPublicKey`] for this author. + pub fn public_key(&self) -> UserPublicKey { + UserPublicKey(self.0.verifying_key()) } -} -impl fmt::Display for NamespaceId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", base32::fmt(self.as_bytes())) + /// Get the [`UserId`] for this author. + pub fn id(&self) -> UserId { + UserId::from(self.public_key()) } -} -impl fmt::Debug for NamespaceSecretKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespaceSecretKey({})", self.fmt_short()) + /// Sign a message with this [`UserSecretKey`] key. + pub fn sign(&self, msg: &[u8]) -> UserSignature { + UserSignature(self.0.sign(msg)) } -} -impl fmt::Debug for NamespaceId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespaceId({})", self.fmt_short()) + /// Strictly verify a signature on a message with this [`UserSecretKey`]'s public key. + pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) } } -impl fmt::Debug for UserId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserId({})", self.fmt_short()) +/// The corresponding public key for a [`UserSecretKey]. +#[derive(Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash, derive_more::From)] +pub struct UserPublicKey(VerifyingKey); + +bytestring!(UserPublicKey, PUBLIC_KEY_LENGTH); + +impl UserPublicKey { + /// Verify that a signature matches the `msg` bytes and was created with the [`UserSecretKey`] + /// that corresponds to this [`UserId`]. + pub fn verify(&self, msg: &[u8], signature: &UserSignature) -> Result<(), SignatureError> { + self.0.verify_strict(msg, &signature.0) } -} -impl fmt::Debug for UserSecretKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserSecretKey({})", self.fmt_short()) + /// Get this [`UserId`] as a byte slice. + pub fn as_bytes(&self) -> &[u8; 32] { + self.0.as_bytes() } -} -impl fmt::Debug for NamespacePublicKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "NamespacePublicKey({})", self.fmt_short()) + /// Convert into a byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() } -} -impl fmt::Debug for UserPublicKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UserPublicKey({})", self.fmt_short()) + /// Create from a slice of bytes. + /// + /// Will return an error if the input bytes do not represent a valid [`ed25519_dalek`] + /// curve point. Will never fail for a byte array returned from [`Self::as_bytes`]. + /// See [`VerifyingKey::from_bytes`] for details. + pub fn from_bytes(bytes: &[u8; 32]) -> Result { + Ok(UserPublicKey(VerifyingKey::from_bytes(bytes)?)) } } @@ -434,23 +326,17 @@ impl From<&UserSecretKey> for UserPublicKey { } } -/// [`NamespacePublicKey`] in bytes -#[derive( - Default, - Clone, - Copy, - PartialOrd, - Ord, - Eq, - PartialEq, - Hash, - derive_more::From, - derive_more::Into, - derive_more::AsRef, - Serialize, - Deserialize, -)] -pub struct NamespaceId([u8; 32]); +/// The signature obtained by signing a message with a [`NamespaceSecretKey`]. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Deref)] +pub struct NamespaceSignature(ed25519_dalek::Signature); + +bytestring!(NamespaceSignature, SIGNATURE_LENGTH); + +/// The signature obtained by signing a message with a [`UserSecretKey`]. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Deref)] +pub struct UserSignature(ed25519_dalek::Signature); + +bytestring!(UserSignature, SIGNATURE_LENGTH); /// [`UserPublicKey`] in bytes #[derive( @@ -462,34 +348,26 @@ pub struct NamespaceId([u8; 32]); Eq, PartialEq, Hash, - derive_more::From, - derive_more::Into, - derive_more::AsRef, + From, + Into, + AsRef, Serialize, Deserialize, )] pub struct UserId([u8; 32]); -impl UserId { - /// Convert to byte array. - pub fn to_bytes(&self) -> [u8; 32] { - self.0 - } +bytestring!(UserId, PUBLIC_KEY_LENGTH); +impl UserId { /// Convert to byte slice. pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } - // /// Convert into [`UserPublicKey`] by fetching from a [`PublicKeyStore`]. - // /// - // /// Fails if the bytes of this [`UserId`] are not a valid [`ed25519_dalek`] curve point. - // pub fn public_key( - // &self, - // store: &S, - // ) -> Result { - // store.author_key(self) - // } + /// Convert into a byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } /// Convert into [`UserPublicKey`]. /// @@ -498,41 +376,44 @@ impl UserId { UserPublicKey::from_bytes(&self.0) } - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(self.0) - } - - pub fn zero() -> Self { - Self([0u8; 32]) - } - + /// Create from a byte array. + /// + /// Does not check if the byte array are a valid [`UserPublicKey`] pub fn from_bytes_unchecked(bytes: [u8; 32]) -> Self { Self(bytes) } } -impl NamespaceId { - /// Convert to byte array. - pub fn to_bytes(&self) -> [u8; 32] { - self.0 - } +/// [`NamespacePublicKey`] in bytes +#[derive( + Default, + Clone, + Copy, + PartialOrd, + Ord, + Eq, + PartialEq, + Hash, + From, + Into, + AsRef, + Serialize, + Deserialize, +)] +pub struct NamespaceId([u8; 32]); +bytestring!(NamespaceId, PUBLIC_KEY_LENGTH); + +impl NamespaceId { /// Convert to byte slice. pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } - // /// Convert into [`NamespacePublicKey`] by fetching from a [`PublicKeyStore`]. - // /// - // /// Fails if the bytes of this [`NamespaceId`] are not a valid [`ed25519_dalek`] curve point. - // pub fn public_key( - // &self, - // store: &S, - // ) -> Result { - // store.namespace_key(self) - // } + /// Convert into a byte array. + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } /// Convert into [`NamespacePublicKey`]. /// @@ -541,22 +422,11 @@ impl NamespaceId { NamespacePublicKey::from_bytes(&self.0) } - /// Convert to a base32 string limited to the first 10 bytes for a friendly string - /// representation of the key. - pub fn fmt_short(&self) -> String { - base32::fmt_short(self.0) - } -} - -impl From<&[u8; 32]> for NamespaceId { - fn from(value: &[u8; 32]) -> Self { - Self(*value) - } -} - -impl From<&[u8; 32]> for UserId { - fn from(value: &[u8; 32]) -> Self { - Self(*value) + /// Create from a byte array. + /// + /// Does not check if the byte array are a valid [`NamespacePublicKey`] + pub fn from_bytes_unchecked(bytes: [u8; 32]) -> Self { + Self(bytes) } } @@ -634,9 +504,3 @@ impl FromStr for NamespaceId { NamespacePublicKey::from_str(s).map(|x| x.into()) } } - -pub fn is_communal(pubkey_bytes: &[u8]) -> bool { - let last = pubkey_bytes.last().expect("pubkey is not empty"); - // Check if last bit is 1. - (*last & 0x1) == 0x1 -} diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 9ef447717a..592ad563eb 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -2,8 +2,8 @@ use serde::{Deserialize, Serialize}; use tracing::debug; use super::{ + grouping::Area, keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH}, - wgps::Area, willow::{AuthorisedEntry, Entry, Unauthorised}, }; @@ -58,7 +58,7 @@ pub struct InvalidParams; pub struct InvalidCapability; /// To be used as an AuthorisationToken for Willow. -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] pub struct MeadowcapAuthorisationToken { /// Certifies that an Entry may be written. pub capability: McCapability, @@ -66,22 +66,15 @@ pub struct MeadowcapAuthorisationToken { pub signature: UserSignature, } -// /// To be used as an AuthorisationToken for Willow. -// #[derive(Debug, Serialize, Deserialize)] +// TODO: We clone these a bunch where it wouldn't be needed if we could create a reference type to +// which the [`MeadowcapAuthorisationToken`] would deref to, but I couldn't make it work nice +// enough. +// #[derive(Debug, Clone, Eq, PartialEq)] // pub struct MeadowcapAuthorisationTokenRef<'a> { // /// Certifies that an Entry may be written. -// capability: &'a McCapability, +// pub capability: &'a McCapability, // /// Proves that the Entry was created by the receiver of the capability. -// signature: &'a UserSignature, -// } -// -// impl<'a> AsRef> for MeadowcapAuthorisationToken { -// fn as_ref(&self) -> &MeadowcapAuthorisationTokenRef { -// &MeadowcapAuthorisationTokenRef { -// capability: &self.capability, -// signature: &self.signature, -// } -// } +// pub signature: &'a UserSignature, // } impl MeadowcapAuthorisationToken { @@ -94,6 +87,7 @@ impl MeadowcapAuthorisationToken { pub fn as_parts(&self) -> (&McCapability, &UserSignature) { (&self.capability, &self.signature) } + pub fn into_parts(self) -> (McCapability, UserSignature) { (self.capability, self.signature) } @@ -105,7 +99,7 @@ impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { } } -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, derive_more::From)] pub enum McCapability { Communal(CommunalCapability), Owned(OwnedCapability), @@ -167,7 +161,7 @@ pub enum AccessMode { Write, } -/// A capability that implements communal namespaces. +/// A capability that authorizes reads or writes in communal namespaces. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct CommunalCapability { /// The kind of access this grants. @@ -179,14 +173,7 @@ pub struct CommunalCapability { /// Remember that we assume SubspaceId and UserPublicKey to be the same types. user_key: UserPublicKey, /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. - delegations: Vec, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -struct Delegation { - area: Area, - user_key: UserPublicKey, - signature: UserSignature, + delegations: Vec<(Area, UserPublicKey, UserSignature)>, } impl CommunalCapability { @@ -205,16 +192,17 @@ impl CommunalCapability { } pub fn is_valid(&self) -> bool { - // TODO: support delegations - if !self.delegations.is_empty() { - return false; + if self.delegations.is_empty() { + // communal capabilities without delegations are always valid + true + } else { + // TODO: support delegations + false } - // communal capabilities without delegations are always valid - true } } -/// A capability that implements owned namespaces. +/// A capability that authorizes reads or writes in owned namespaces. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct OwnedCapability { /// The kind of access this grants. @@ -230,20 +218,6 @@ pub struct OwnedCapability { } impl OwnedCapability { - // TODO: zerocopy? - pub fn signable( - access_mode: AccessMode, - user_key: &UserPublicKey, - ) -> [u8; PUBLIC_KEY_LENGTH + 1] { - let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; - signable[0] = match access_mode { - AccessMode::Read => 0x02, - AccessMode::Write => 0x03, - }; - signable[1..].copy_from_slice(user_key.as_bytes()); - signable - } - pub fn new( namespace_secret_key: &NamespaceSecretKey, user_key: UserPublicKey, @@ -263,7 +237,6 @@ impl OwnedCapability { pub fn receiver(&self) -> &UserPublicKey { // TODO: support delegations - // self.delegations.last().map(|d| &d.user_key).unwrap_or(&self.user_key) &self.user_key } @@ -277,13 +250,30 @@ impl OwnedCapability { } pub fn is_valid(&self) -> bool { - // TODO: support delegations - if !self.delegations.is_empty() { - return false; + if self.delegations.is_empty() { + let signable = Self::signable(self.access_mode, &self.user_key); + self.namespace_key + .verify(&signable, &self.initial_authorisation) + .is_ok() + } else { + // TODO: support delegations + false } - let signable = Self::signable(self.access_mode, &self.user_key); - self.namespace_key - .verify(&signable, &self.initial_authorisation) - .is_ok() + } + + fn signable(access_mode: AccessMode, user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { + let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; + // https://willowprotocol.org/specs/meadowcap/index.html#owned_cap_valid + // An OwnedCapability with zero delegations is valid if initial_authorisation + // is a NamespaceSignature issued by the namespace_key over + // either the byte 0x02 (if access_mode is read) + // or the byte 0x03 (if access_mode is write), + // followed by the user_key (encoded via encode_user_pk). + signable[0] = match access_mode { + AccessMode::Read => 0x02, + AccessMode::Write => 0x03, + }; + signable[1..].copy_from_slice(user_key.as_bytes()); + signable } } diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 5f95207ba4..4a97f8ec60 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,4 +1,4 @@ -use std::cmp::Ordering; +use std::{cmp::Ordering, fmt}; use bytes::Bytes; use ed25519_dalek::Signature; @@ -7,6 +7,7 @@ use iroh_net::key::PublicKey; use serde::{Deserialize, Serialize}; use super::{ + grouping::{Area, AreaOfInterest, SubspaceArea, ThreeDRange}, keys, meadowcap, willow::{ AuthorisationToken, AuthorisedEntry, Entry, Path, PossiblyAuthorisedEntry, SubspaceId, @@ -14,13 +15,14 @@ use super::{ }, }; -pub const MAXIMUM_PAYLOAD_SIZE_POWER: u8 = 12; +pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; + /// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: /// when an Entry’s payload_length is strictly greater than the maximum payload size, /// its Payload may only be transmitted when explicitly requested. /// /// The value is 4096. -pub const MAXIMUM_PAYLOAD_SIZE: usize = 2usize.pow(MAXIMUM_PAYLOAD_SIZE_POWER as u32); +pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); pub const CHALLENGE_LENGTH: usize = 32; pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; @@ -73,7 +75,9 @@ pub enum LogicalChannel { ControlChannel, /// Logical channel for performing 3d range-based set reconciliation. ReconciliationChannel, - // TODO: actually use more channels + // TODO: use all the channels + // right now everything but reconciliation goes into the control channel + // // /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. // DataChannel, // @@ -138,245 +142,7 @@ pub struct CommitmentReveal { pub nonce: AccessChallenge, } -// skip: Private Area Intersection - -/// A grouping of Entries that are among the newest in some store. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct AreaOfInterest { - /// To be included in this AreaOfInterest, an Entry must be included in the area. - pub area: Area, - /// To be included in this AreaOfInterest, an Entry’s timestamp must be among the max_count greatest Timestamps, unless max_count is zero. - pub max_count: u64, - /// The total payload_lengths of all included Entries is at most max_size, unless max_size is zero. - pub max_size: u64, -} - -impl AreaOfInterest { - pub fn full() -> Self { - Self { - area: Area::full(), - max_count: 0, - max_size: 0, - } - } -} - -/// A grouping of Entries. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct Area { - /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. - pub subspace_id: SubspaceArea, - /// To be included in this Area, an Entry’s path must be prefixed by the path. - pub path: Path, - /// To be included in this Area, an Entry’s timestamp must be included in the times. - pub times: Range, -} - -impl Area { - pub const fn new(subspace_id: SubspaceArea, path: Path, times: Range) -> Self { - Self { - subspace_id, - path, - times, - } - } - - pub fn full() -> Self { - Self::new(SubspaceArea::Any, Path::empty(), Range::::FULL) - } - - pub fn empty() -> Self { - Self::new(SubspaceArea::Any, Path::empty(), Range::::EMPTY) - } - - pub fn subspace(subspace_id: SubspaceId) -> Self { - Self::new( - SubspaceArea::Id(subspace_id), - Path::empty(), - Range::::FULL, - ) - } - - pub fn includes_entry(&self, entry: &Entry) -> bool { - self.subspace_id.includes_subspace(&entry.subspace_id) - && self.path.is_prefix_of(&entry.path) - && self.times.includes(&entry.timestamp) - } - - pub fn includes_area(&self, other: &Area) -> bool { - self.subspace_id.includes(&other.subspace_id) - && self.path.is_prefix_of(&other.path) - && self.times.includes_range(&other.times) - } - - pub fn includes_range(&self, range: &ThreeDRange) -> bool { - let path_start = self.path.is_prefix_of(&range.paths.start); - let path_end = match &range.paths.end { - RangeEnd::Open => true, - RangeEnd::Closed(path) => self.path.is_prefix_of(path), - }; - let subspace_start = self.subspace_id.includes_subspace(&range.subspaces.start); - let subspace_end = match range.subspaces.end { - RangeEnd::Open => true, - RangeEnd::Closed(subspace) => self.subspace_id.includes_subspace(&subspace), - }; - subspace_start - && subspace_end - && path_start - && path_end - && self.times.includes_range(&range.times) - } - - pub fn into_range(&self) -> ThreeDRange { - let subspace_start = match self.subspace_id { - SubspaceArea::Any => SubspaceId::zero(), - SubspaceArea::Id(id) => id, - }; - let subspace_end = match self.subspace_id { - SubspaceArea::Any => RangeEnd::Open, - SubspaceArea::Id(id) => subspace_range_end(id), - }; - let path_start = self.path.clone(); - let path_end = path_range_end(&self.path); - ThreeDRange { - subspaces: Range::new(subspace_start, subspace_end), - paths: Range::new(path_start, path_end), - times: self.times.clone(), - } - } - - pub fn intersection(&self, other: &Area) -> Option { - let subspace_id = self.subspace_id.intersection(&other.subspace_id)?; - let path = self.path.intersection(&other.path)?; - let times = self.times.intersection(&other.times)?; - Some(Self { - subspace_id, - times, - path, - }) - } -} - -fn path_range_end(path: &Path) -> RangeEnd { - if path.is_empty() { - RangeEnd::Open - } else { - let mut out = vec![]; - for component in path.iter().rev() { - // component can be incremented - if out.is_empty() && component.iter().any(|x| *x != 0xff) { - let mut bytes = Vec::with_capacity(component.len()); - bytes.copy_from_slice(&component); - let incremented = increment_by_one(&mut bytes); - debug_assert!(incremented, "checked above"); - out.push(Bytes::from(bytes)); - break; - // component cannot be incremented - } else if out.is_empty() { - continue; - } else { - out.push(component.clone()) - } - } - if out.is_empty() { - RangeEnd::Open - } else { - out.reverse(); - RangeEnd::Closed(Path::from_bytes_unchecked(out)) - } - } - // let mut bytes = id.to_bytes(); - // if increment_by_one(&mut bytes) { - // RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) - // } else { - // RangeEnd::Open - // } -} - -fn subspace_range_end(id: SubspaceId) -> RangeEnd { - let mut bytes = id.to_bytes(); - if increment_by_one(&mut bytes) { - RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) - } else { - RangeEnd::Open - } -} - -/// Increment a byte string by one, by incrementing the last byte that is not 255 by one. -/// -/// Returns false if all bytes are 255. -fn increment_by_one(value: &mut [u8]) -> bool { - for char in value.iter_mut().rev() { - if *char != 255 { - *char += 1; - return true; - } else { - *char = 0; - } - } - false -} - -impl Range { - pub const FULL: Self = Self { - start: 0, - end: RangeEnd::Open, - }; - - pub const EMPTY: Self = Self { - start: 0, - end: RangeEnd::Closed(0), - }; - - fn intersection(&self, other: &Self) -> Option { - let start = self.start.max(other.start); - let end = match (&self.end, &other.end) { - (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(*b), - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(*a.min(b)), - (RangeEnd::Closed(a), RangeEnd::Open) => RangeEnd::Closed(*a), - (RangeEnd::Open, RangeEnd::Open) => RangeEnd::Open, - }; - match end { - RangeEnd::Open => Some(Self::new(start, end)), - RangeEnd::Closed(t) if t >= start => Some(Self::new(start, end)), - RangeEnd::Closed(_) => Some(Self::new(start, end)), - } - } -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub enum SubspaceArea { - Any, - Id(SubspaceId), -} - -impl SubspaceArea { - fn includes(&self, other: &SubspaceArea) -> bool { - match (self, other) { - (SubspaceArea::Any, SubspaceArea::Any) => true, - (SubspaceArea::Id(_), SubspaceArea::Any) => false, - (_, SubspaceArea::Id(id)) => self.includes_subspace(id), - } - } - fn includes_subspace(&self, subspace_id: &SubspaceId) -> bool { - match self { - Self::Any => true, - Self::Id(id) => id == subspace_id, - } - } - - fn intersection(&self, other: &Self) -> Option { - match (self, other) { - (Self::Any, Self::Any) => Some(Self::Any), - (Self::Id(a), Self::Any) => Some(Self::Id(*a)), - (Self::Any, Self::Id(b)) => Some(Self::Id(*b)), - (Self::Id(a), Self::Id(b)) if a == b => Some(Self::Id(*a)), - (Self::Id(_a), Self::Id(_b)) => None, - } - } -} - -#[derive(Serialize, Deserialize, derive_more::From, derive_more::Debug)] +#[derive(Serialize, Deserialize, derive_more::From, derive_more::Debug, strum::Display)] pub enum Message { #[debug("{:?}", _0)] CommitmentReveal(CommitmentReveal), @@ -485,6 +251,11 @@ pub struct ReconciliationSendFingerprint { pub sender_handle: AreaOfInterestHandle, /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. pub receiver_handle: AreaOfInterestHandle, + /// If this is this the last reply to range received via [`ReconciliationSendFingerprint`] or [`ReconciliationAnnounceEntries`] + /// from the other peer, set to that range to indicate to the other peer that no further replies for that range will be sent + /// + /// TODO: This is a spec deviation, discuss further and remove or upstream + pub is_final_reply_for_range: Option, } /// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. @@ -502,6 +273,11 @@ pub struct ReconciliationAnnounceEntries { pub sender_handle: AreaOfInterestHandle, /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. pub receiver_handle: AreaOfInterestHandle, + /// If this is this the last reply to range received via [`ReconciliationSendFingerprint`] or [`ReconciliationAnnounceEntries`] + /// from the other peer, set to that range to indicate to the other peer that no further replies for that range will be sent + /// + /// TODO: This is a spec deviation, discuss further and remove or upstream + pub is_final_reply_for_range: Option, } /// Transmit a LengthyEntry as part of 3d range-based set reconciliation. @@ -540,9 +316,15 @@ impl LengthyEntry { } } -#[derive(Debug, Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] +#[derive(Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] pub struct Fingerprint(pub [u8; 32]); +impl fmt::Debug for Fingerprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(&self.0)) + } +} + impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { let next = Fingerprint(*Hash::new(&entry.encode()).as_bytes()); @@ -560,6 +342,10 @@ impl Fingerprint { this.add_entries(iter); this } + + pub fn is_empty(&self) -> bool { + *self == Self::default() + } } impl std::ops::BitXorAssign for Fingerprint { @@ -570,109 +356,26 @@ impl std::ops::BitXorAssign for Fingerprint { } } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ThreeDRange { - pub paths: Range, - pub subspaces: Range, - pub times: Range, -} - -impl ThreeDRange { - pub fn includes_entry(&self, entry: &Entry) -> bool { - self.subspaces.includes(&entry.subspace_id) - && self.paths.includes(&entry.path) - && self.times.includes(&entry.timestamp) - } -} - -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] -pub struct Range { - pub start: T, - pub end: RangeEnd, -} - -impl Range { - pub fn new(start: T, end: RangeEnd) -> Self { - Self { start, end } - } - pub fn is_closed(&self) -> bool { - matches!(self.end, RangeEnd::Closed(_)) - } - pub fn is_open(&self) -> bool { - matches!(self.end, RangeEnd::Open) - } -} - -impl Range { - pub fn includes(&self, value: &T) -> bool { - value >= &self.start && self.end.includes(value) - } - - pub fn includes_range(&self, other: &Range) -> bool { - self.start <= other.start && self.end >= other.end - } -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum RangeEnd { - Closed(T), - Open, -} - -impl PartialOrd for RangeEnd { - fn partial_cmp(&self, other: &Self) -> Option { - match (self, other) { - (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), - (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), - (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), - } - } -} - -// impl PartialOrd for RangeEnd { -// fn partial_cmp(&self, other: &T) -> Option { -// // match (self, other) { -// // (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), -// // (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), -// // (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), -// // (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), -// // } -// } -// } - -impl RangeEnd { - pub fn includes(&self, value: &T) -> bool { - match self { - Self::Open => true, - Self::Closed(end) => value < end, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum ControlMessage {} - /// Make a binding promise of available buffer capacity to the other peer #[derive(Debug, Serialize, Deserialize)] pub struct ControlIssueGuarantee { - amount: u64, - channel: LogicalChannel, + pub amount: u64, + pub channel: LogicalChannel, } /// Allow the other peer to reduce its total buffer capacity by amount. #[derive(Debug, Serialize, Deserialize)] pub struct ControlAbsolve { - amount: u64, - channel: LogicalChannel, + pub amount: u64, + pub channel: LogicalChannel, } /// Ask the other peer to send an ControlAbsolve message /// such that the receiver remaining guarantees will be target. #[derive(Debug, Serialize, Deserialize)] pub struct ControlPlead { - target: u64, - channel: LogicalChannel, + pub target: u64, + pub channel: LogicalChannel, } /// The server notifies the client that it has started dropping messages and will continue @@ -680,13 +383,13 @@ pub struct ControlPlead { /// guarantees of the logical channel before sending a AnnounceDropping message. #[derive(Debug, Serialize, Deserialize)] pub struct ControlAnnounceDropping { - channel: LogicalChannel, + pub channel: LogicalChannel, } /// The client notifies the server that it can stop dropping messages on this logical channel. #[derive(Debug, Serialize, Deserialize)] pub struct ControlApologise { - channel: LogicalChannel, + pub channel: LogicalChannel, } /// Ask the other peer to free a resource handle. diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 37b699b26e..41ff71e208 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -12,35 +12,37 @@ use super::{ }, }; +/// A type for identifying namespaces. pub type NamespaceId = keys::NamespaceId; + +/// A type for identifying subspaces. pub type SubspaceId = keys::UserId; + +/// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). +/// Timestamps are to be interpreted as a time in microseconds since the Unix epoch. pub type Timestamp = u64; + +/// A totally ordered type for content-addressing the data that Willow stores. pub type PayloadDigest = Hash; + +/// The type of components of a [`Path`]. pub type Component = Bytes; +// A for proving write permission. pub type AuthorisationToken = meadowcap::MeadowcapAuthorisationToken; -// pub type AuthorisationTokenRef<'a> = meadowcap::MeadowcapAuthorisationTokenRef; /// A natural number for limiting the length of path components. pub const MAX_COMPONENT_LENGTH: usize = 4096; + /// A natural number for limiting the number of path components. pub const MAX_COMPONENT_COUNT: usize = 1024; + /// A natural number max_path_length for limiting the overall size of paths. pub const MAX_PATH_LENGTH: usize = 4096; +/// The byte length of a [`PayloadDigest`]. pub const DIGEST_LENGTH: usize = 32; -/// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. -/// We can represent the length of any Component in path_length_power bytes. -/// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). -/// -/// The value `2` means that we can encode paths up to 64KiB long. -const PATH_LENGTH_POWER: usize = 2; -const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; - -type UPathLengthPower = u16; -type UPathCountPower = u16; - /// Error returned for entries that are not authorised. /// /// See [`is_authorised_write`] for details. @@ -60,26 +62,14 @@ pub enum InvalidPath { TooManyComponents, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct Path(Arc<[Component]>); -// TODO: zerocopy support for path -// #[allow(missing_debug_implementations)] -// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// #[repr(C, packed)] -// pub struct ComponentRef([u8]); -// -// #[allow(missing_debug_implementations)] -// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// #[repr(C, packed)] -// pub struct PathRef([ComponentRef]); -// pub struct PathRef<'a>(&'a [&'a [u8]]); -// impl<'a> AsRef> for Path { -// fn as_ref(&'a self) -> &'a PathRef<'a> { -// todo!() -// } -// } - +impl Default for Path { + fn default() -> Self { + Path::empty() + } +} impl Path { pub fn new(components: &[&[u8]]) -> Result { Self::validate(components)?; @@ -123,29 +113,6 @@ impl Path { Self(Arc::new([])) } - pub fn encoded_len(&self) -> usize { - let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; - let data_len = self.iter().map(Bytes::len).sum::(); - lengths_len + data_len - } - - /// Encode in the format for signatures into a mutable vector. - pub fn encode_into(&self, out: &mut Vec) { - let component_count = self.len() as UPathCountPower; - out.extend_from_slice(&component_count.to_be_bytes()); - for component in self.iter() { - let len = component.len() as UPathLengthPower; - out.extend_from_slice(&len.to_be_bytes()); - out.extend_from_slice(&component); - } - } - - pub fn encode(&self) -> Vec { - let mut out = Vec::with_capacity(self.encoded_len()); - self.encode_into(&mut out); - out - } - pub fn intersection(&self, other: &Path) -> Option { if self.is_prefix_of(other) { Some(self.clone()) @@ -154,24 +121,6 @@ impl Path { } else { None } - // if self == other { - // Some(self.clone()) - // } else { - // let mut out = Vec::new(); - // for (a, b) in self.iter().zip(other.iter()) { - // if a == b { - // out.push(a.clone()); - // } else { - // break; - // } - // } - // if out.is_empty() { - // None - // } else { - // Some(Path::from_bytes_unchecked(out)) - // } - // } - // if self.is_prefix_of(&other) } } @@ -204,7 +153,7 @@ impl Ord for Path { } /// The metadata for storing a Payload. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash)] pub struct Entry { /// The identifier of the namespace to which the Entry belongs. pub namespace_id: NamespaceId, @@ -223,32 +172,17 @@ pub struct Entry { pub payload_digest: PayloadDigest, } -impl PartialOrd for Entry { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Entry { - fn cmp(&self, other: &Self) -> Ordering { - if other.timestamp < self.timestamp +impl Entry { + pub fn is_newer_than(&self, other: &Entry) -> bool { + other.timestamp < self.timestamp || (other.timestamp == self.timestamp && other.payload_digest < self.payload_digest) || (other.timestamp == self.timestamp && other.payload_digest == self.payload_digest && other.payload_length < self.payload_length) - { - Ordering::Greater - } else if self == other { - Ordering::Equal - } else { - Ordering::Less - } } -} -impl Entry { - pub fn is_newer_than(&self, other: &Entry) -> bool { - self > other + pub fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path) { + (&self.namespace_id, &self.subspace_id, &self.path) } pub fn attach_authorisation( @@ -258,26 +192,6 @@ impl Entry { ) -> Result { attach_authorisation(self, capability, secret_key) } - - /// Convert the entry to a byte slice. - /// - /// This is invoked to create the signable for signatures over the entry. Thus, any change in - /// the encoding format here will make existing signatures invalid. - /// - /// The encoding follows the [`Willow spec for encoding`](https://willowprotocol.org/specs/encodings/index.html#enc_entry). - // TODO: make sure that the encoding fits the spec - pub fn encode(&self) -> Vec { - let path_len = self.path.encoded_len(); - let len = PUBLIC_KEY_LENGTH + PUBLIC_KEY_LENGTH + path_len + 8 + 8 + DIGEST_LENGTH; - let mut out = Vec::with_capacity(len); - out.extend_from_slice(self.namespace_id.as_bytes()); - out.extend_from_slice(self.subspace_id.as_bytes()); - self.path.encode_into(&mut out); - out.extend_from_slice(&self.timestamp.to_be_bytes()); - out.extend_from_slice(&self.payload_length.to_be_bytes()); - out.extend_from_slice(self.payload_digest.as_bytes()); - out - } } /// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. @@ -335,7 +249,7 @@ impl AuthorisedEntry { true } - /// Warning: Use only if you can assure that the authorisation was previously checked! + /// Use only if you can assure that the authorisation was previously checked! pub fn from_parts_unchecked(entry: Entry, authorisation_token: AuthorisationToken) -> Self { Self(entry, authorisation_token) } @@ -349,9 +263,97 @@ impl AuthorisedEntry { } } -// impl std::ops::Deref for AuthorisedEntry { -// type Target = PossiblyAuthorisedEntry; -// fn deref(&self) -> &Self::Target { -// &self.0 +// TODO: zerocopy support for path +// #[allow(missing_debug_implementations)] +// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// #[repr(C, packed)] +// pub struct ComponentRef([u8]); +// +// #[allow(missing_debug_implementations)] +// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// #[repr(C, packed)] +// pub struct PathRef([ComponentRef]); +// pub struct PathRef<'a>(&'a [&'a [u8]]); +// impl<'a> AsRef> for Path { +// fn as_ref(&'a self) -> &'a PathRef<'a> { +// todo!() // } // } + +pub mod encodings { + //! Encoding for Willow entries + //! + //! TODO: Verify that these are correct accoring to the spec! These encodings are the message + //! bytes for authorisation signatures, so we better not need to change them again. + + use bytes::Bytes; + + use crate::proto::keys::PUBLIC_KEY_LENGTH; + + use super::{Entry, Path, DIGEST_LENGTH}; + + /// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. + /// We can represent the length of any Component in path_length_power bytes. + /// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). + /// + /// The value `2` means that we can encode paths up to 64KiB long. + const PATH_LENGTH_POWER: usize = 2; + const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; + type UPathLengthPower = u16; + type UPathCountPower = u16; + + impl Path { + pub fn encoded_len(&self) -> usize { + let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; + let data_len = self.iter().map(Bytes::len).sum::(); + lengths_len + data_len + } + + /// Encode in the format for signatures into a mutable vector. + pub fn encode_into(&self, out: &mut Vec) { + let component_count = self.len() as UPathCountPower; + out.extend_from_slice(&component_count.to_be_bytes()); + for component in self.iter() { + let len = component.len() as UPathLengthPower; + out.extend_from_slice(&len.to_be_bytes()); + out.extend_from_slice(&component); + } + } + + pub fn encode(&self) -> Vec { + let mut out = Vec::with_capacity(self.encoded_len()); + self.encode_into(&mut out); + out + } + } + + impl Entry { + /// Convert the entry to a byte slice. + /// + /// This is invoked to create the signable for signatures over the entry. Thus, any change in + /// the encoding format here will make existing signatures invalid. + /// + /// The encoding follows the [`Willow spec for encoding`](https://willowprotocol.org/specs/encodings/index.html#enc_entry). + // TODO: make sure that the encoding fits the spec + pub fn encode(&self) -> Vec { + let len = self.encoded_len(); + let mut out = Vec::with_capacity(len); + self.encode_into(&mut out); + out + } + + pub fn encode_into(&self, out: &mut Vec) { + out.extend_from_slice(self.namespace_id.as_bytes()); + out.extend_from_slice(self.subspace_id.as_bytes()); + self.path.encode_into(out); + out.extend_from_slice(&self.timestamp.to_be_bytes()); + out.extend_from_slice(&self.payload_length.to_be_bytes()); + out.extend_from_slice(self.payload_digest.as_bytes()); + } + + pub fn encoded_len(&self) -> usize { + let path_len = self.path.encoded_len(); + PUBLIC_KEY_LENGTH + PUBLIC_KEY_LENGTH + path_len + 8 + 8 + DIGEST_LENGTH + } + } +} diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index a0720b6e43..09c51c1157 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,31 +1,38 @@ -use core::fmt; -use std::collections::{hash_map, HashMap, VecDeque}; +use std::{ + collections::{hash_map, HashMap, HashSet, VecDeque}, + fmt, +}; use anyhow::bail; use ed25519_dalek::SignatureError; -use tracing::{debug, warn}; +use futures::future::BoxFuture; +use genawaiter::{sync::Gen, GeneratorState}; +use iroh_base::hash::Hash; +use tracing::{debug, info, trace, warn}; use crate::{ proto::{ + grouping::{Area, AreaOfInterest, ThreeDRange}, keys::{NamespaceId, NamespacePublicKey, UserSecretKey, UserSignature}, meadowcap::{is_authorised_write, InvalidCapability}, wgps::{ - AccessChallenge, Area, AreaOfInterest, AreaOfInterestHandle, CapabilityHandle, - ChallengeHash, CommitmentReveal, ControlAbsolve, ControlAnnounceDropping, - ControlApologise, ControlFreeHandle, ControlIssueGuarantee, ControlPlead, Fingerprint, - Handle, HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, - ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, - SetupBindStaticToken, StaticToken, StaticTokenHandle, ThreeDRange, - CHALLENGE_HASH_LENGTH, + AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, + CommitmentReveal, ControlAbsolve, ControlAnnounceDropping, ControlApologise, + ControlFreeHandle, ControlIssueGuarantee, ControlPlead, Fingerprint, Handle, + HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, ReadCapability, + ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, + SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, + StaticTokenHandle, CHALLENGE_HASH_LENGTH, }, willow::{ AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, }, }, - store::{RangeSplitPart, Store}, + store::{RangeSplit, SplitAction, Store, SyncConfig}, }; +const LOGICAL_CHANNEL_CAP: usize = 128; + #[derive(Debug)] struct ResourceMap { next_handle: u64, @@ -108,6 +115,8 @@ pub enum Error { UnauthorisedEntryReceived, #[error("received an unsupported message type")] UnsupportedMessage, + #[error("the received nonce does not match the received committment")] + BrokenCommittement, } impl From for Error { @@ -151,23 +160,24 @@ pub enum Role { #[derive(Debug)] pub struct Session { - our_role: Role, + role: Role, our_nonce: AccessChallenge, + received_commitment: ChallengeHash, init: Option, challenge: Option, their_maximum_payload_size: usize, - received_commitment: ChallengeHash, control_channel: Channel, reconciliation_channel: Channel, our_current_aoi: Option, - us: PeerState, - them: PeerState, - - done: bool, + our_resources: Resources, + their_resources: Resources, + pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pending_entries: Option, + reconciliation_started: bool, } #[derive(Debug)] @@ -192,11 +202,10 @@ impl Challenges { } #[derive(Debug, Default)] -pub struct PeerState { +pub struct Resources { capabilities: ResourceMap, areas_of_interest: ResourceMap, static_tokens: ResourceMap, - reconciliation_announce_entries: Option, // intersections: ResourceMap, } #[derive(Debug)] @@ -208,6 +217,13 @@ pub struct SessionInit { pub area_of_interest: AreaOfInterest, } +#[derive(Debug)] +pub enum Yield { + InboxEmpty, + OutboxFull, + Reconciled, +} + impl Session { pub fn new( our_role: Role, @@ -217,44 +233,65 @@ impl Session { init: SessionInit, ) -> Self { let mut this = Self { - our_role, + role: our_role, our_nonce, challenge: None, their_maximum_payload_size, received_commitment, control_channel: Default::default(), reconciliation_channel: Default::default(), - us: Default::default(), - them: Default::default(), + our_resources: Default::default(), + their_resources: Default::default(), our_current_aoi: None, // config init: Some(init), - done: false, + pending_ranges: Default::default(), + pending_entries: Default::default(), + reconciliation_started: false, }; let msg = CommitmentReveal { nonce: our_nonce }; this.control_channel.send(msg); this } - fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { - let challenge = self - .challenge - .as_ref() - .ok_or(Error::InvalidMessageInCurrentState)?; - let signature = secret_key.sign(&challenge.ours); - Ok(signature) - } - pub fn drain_outbox(&mut self) -> impl Iterator + '_ { self.control_channel .outbox_drain() .chain(self.reconciliation_channel.outbox_drain()) } - pub fn init(&mut self, init: &SessionInit) -> Result<(), Error> { + pub fn our_role(&self) -> Role { + self.role + } + + pub fn recv(&mut self, message: Message) { + match message.logical_channel() { + LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), + LogicalChannel::ReconciliationChannel => { + self.reconciliation_channel.inbox_push_or_drop(message) + } + } + } + + pub fn process(&mut self, store: &mut S) -> Result { + trace!(pending = self.pending_ranges.len(), "process start!"); + // always process control messages first + while let Some(message) = self.control_channel.inbox_pop() { + self.process_control(store, message)?; + } + while let Some(message) = self.reconciliation_channel.inbox_pop() { + self.process_reconciliation(store, message)?; + } + trace!(pending = self.pending_ranges.len(), "process done!"); + Ok(self.reconciliation_started + && self.pending_ranges.is_empty() + && self.pending_entries.is_none()) + } + + fn init(&mut self, init: &SessionInit) -> Result<(), Error> { let area_of_interest = init.area_of_interest.clone(); let capability = init.capability.clone(); - debug!(role=?self.our_role, ?init, "init"); + debug!(?init, "init"); if *capability.receiver() != init.user_secret_key.public_key() { return Err(Error::WrongSecretKeyForCapability); } @@ -264,7 +301,7 @@ impl Session { // register read capability let signature = self.sign_challenge(&init.user_secret_key)?; - let our_capability_handle = self.us.capabilities.bind(capability.clone()); + let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); let msg = SetupBindReadCapability { capability, handle: intersection_handle, @@ -277,34 +314,20 @@ impl Session { area_of_interest, authorisation: our_capability_handle, }; - let our_aoi_handle = self.us.areas_of_interest.bind(msg.clone()); + let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg.clone()); self.control_channel.send(msg); self.our_current_aoi = Some(our_aoi_handle); Ok(()) } - pub fn our_role(&self) -> Role { - self.our_role - } - - pub fn recv(&mut self, message: Message) { - match message.logical_channel() { - LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), - LogicalChannel::ReconciliationChannel => { - self.reconciliation_channel.inbox_push_or_drop(message) - } - } - } - - pub fn process(&mut self, store: &mut S) -> Result { - while let Some(message) = self.control_channel.inbox_pop() { - self.process_control(store, message)?; - } - while let Some(message) = self.reconciliation_channel.inbox_pop() { - self.process_reconciliation(store, message)?; - } - Ok(self.done) + fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { + let challenge = self + .challenge + .as_ref() + .ok_or(Error::InvalidMessageInCurrentState)?; + let signature = secret_key.sign(&challenge.ours); + Ok(signature) } fn process_control(&mut self, store: &mut S, message: Message) -> Result<(), Error> { @@ -313,15 +336,18 @@ impl Session { if self.challenge.is_some() { return Err(Error::InvalidMessageInCurrentState); } + if Hash::new(&msg.nonce).as_bytes() != &self.received_commitment { + return Err(Error::BrokenCommittement); + } self.challenge = Some(Challenges::from_nonces( - self.our_role, + self.role, self.our_nonce, msg.nonce, )); if let Some(init) = self.init.take() { self.init(&init)?; } else { - return Err(Error::InvalidMessageInCurrentState); + unreachable!("checked above with self.challeng") } } Message::SetupBindReadCapability(msg) => { @@ -334,22 +360,25 @@ impl Session { .receiver() .verify(&challenge.theirs, &msg.signature)?; // TODO: verify intersection handle - self.them.capabilities.bind(msg.capability); + self.their_resources.capabilities.bind(msg.capability); } Message::SetupBindStaticToken(msg) => { - self.them.static_tokens.bind(msg.static_token); + self.their_resources.static_tokens.bind(msg.static_token); } Message::SetupBindAreaOfInterest(msg) => { - let capability = self.them.capabilities.try_get(&msg.authorisation)?; + let capability = self + .their_resources + .capabilities + .try_get(&msg.authorisation)?; capability.try_granted_area(&msg.area_of_interest.area)?; - let their_aoi_handle = self.them.areas_of_interest.bind(msg); + let their_handle = self.their_resources.areas_of_interest.bind(msg); - if self.our_role == Role::Alfie { - if let Some(our_aoi_handle) = self.our_current_aoi.clone() { - self.init_reconciliation(store, &our_aoi_handle, &their_aoi_handle)?; + if self.role == Role::Alfie { + if let Some(our_handle) = self.our_current_aoi.clone() { + self.init_reconciliation(store, &our_handle, &their_handle)?; } else { warn!( - "received area of interest from remote, but nothing setup on our side" + "received area of interest from remote, but no area of interest set on our side" ); } } @@ -362,8 +391,11 @@ impl Session { Ok(()) } - pub fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { - let (handle, is_new) = self.us.static_tokens.bind_if_new(static_token.clone()); + fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { + let (handle, is_new) = self + .our_resources + .static_tokens + .bind_if_new(static_token.clone()); if is_new { let msg = SetupBindStaticToken { static_token }; self.control_channel @@ -373,39 +405,82 @@ impl Session { } /// Uses the blocking [`Store`] and thus may only be called in the worker thread. - pub fn init_reconciliation( + fn init_reconciliation( &mut self, store: &mut S, - our_aoi_handle: &AreaOfInterestHandle, - their_aoi_handle: &AreaOfInterestHandle, + our_handle: &AreaOfInterestHandle, + their_handle: &AreaOfInterestHandle, ) -> Result<(), Error> { - let our_aoi = self.us.areas_of_interest.try_get(&our_aoi_handle)?; - let their_aoi = self.us.areas_of_interest.try_get(&their_aoi_handle)?; - - let our_capability = self.us.capabilities.try_get(&our_aoi.authorisation)?; + let our_aoi = self.our_resources.areas_of_interest.try_get(&our_handle)?; + let their_aoi = self + .our_resources + .areas_of_interest + .try_get(&their_handle)?; + + let our_capability = self + .our_resources + .capabilities + .try_get(&our_aoi.authorisation)?; let namespace = our_capability.granted_namespace(); // TODO: intersect with their_aoi first - let area = &our_aoi + let common_aoi = &our_aoi .area() .intersection(&their_aoi.area()) .ok_or(Error::AreaOfInterestDoesNotOverlap)?; - let range = area.into_range(); + let range = common_aoi.into_range(); let fingerprint = store.range_fingerprint(namespace.into(), &range)?; + self.send_fingerprint(range, fingerprint, *our_handle, *their_handle, None); + self.reconciliation_started = true; + Ok(()) + } + + fn send_fingerprint( + &mut self, + range: ThreeDRange, + fingerprint: Fingerprint, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + is_final_reply_for_range: Option, + ) { + self.pending_ranges.insert((our_handle, range.clone())); let msg = ReconciliationSendFingerprint { range, fingerprint, - sender_handle: *our_aoi_handle, - receiver_handle: *their_aoi_handle, + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range, }; self.reconciliation_channel.send(msg); - Ok(()) } - // fn send_fingerprint(&mut self, store: &mut S, ) + fn announce_empty( + &mut self, + range: ThreeDRange, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + want_response: bool, + is_final_reply_for_range: Option, + ) -> Result<(), Error> { + if want_response { + self.pending_ranges.insert((our_handle, range.clone())); + } + let msg = ReconciliationAnnounceEntries { + range, + count: 0, + want_response, + will_sort: false, + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range, + }; + self.reconciliation_channel + .send(Message::ReconciliationAnnounceEntries(msg)); + Ok(()) + } - fn announce_entries( + fn announce_then_send_entries( &mut self, store: &mut S, namespace: NamespaceId, @@ -413,47 +488,104 @@ impl Session { our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, want_response: bool, + is_final_reply_for_range: Option, + local_count: Option, ) -> Result<(), Error> { - for part in store.split_range(namespace, &range)?.into_iter() { - match part { - RangeSplitPart::SendFingerprint(range, fingerprint) => { - let msg = ReconciliationSendFingerprint { - range, - fingerprint, - sender_handle: our_handle, - receiver_handle: their_handle, - }; - self.reconciliation_channel - .send(Message::ReconciliationSendFingerprint(msg)); - } - RangeSplitPart::SendEntries(range, local_count) => { - let msg = ReconciliationAnnounceEntries { - range: range.clone(), - count: local_count, - want_response, - will_sort: false, // todo: sorted? - sender_handle: our_handle, - receiver_handle: their_handle, - }; - self.reconciliation_channel.send(msg); - for authorised_entry in store.get_entries_with_authorisation(namespace, &range) - { - let authorised_entry = authorised_entry?; - let (entry, token) = authorised_entry.into_parts(); - let (static_token, dynamic_token) = token.into_parts(); - // todo: partial entries - let available = entry.payload_length; - let static_token_handle = self.bind_static_token(static_token); - let msg = ReconciliationSendEntry { - entry: LengthyEntry::new(entry, available), - static_token_handle, - dynamic_token, - }; - self.reconciliation_channel.send(msg); + if want_response { + self.pending_ranges.insert((our_handle, range.clone())); + } + let local_count = match local_count { + Some(count) => count, + None => store.count_range(namespace, &range)?, + }; + let msg = ReconciliationAnnounceEntries { + range: range.clone(), + count: local_count, + want_response, + will_sort: false, // todo: sorted? + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range, + }; + self.reconciliation_channel.send(msg); + for authorised_entry in store.get_entries_with_authorisation(namespace, &range) { + let authorised_entry = authorised_entry?; + let (entry, token) = authorised_entry.into_parts(); + let (static_token, dynamic_token) = token.into_parts(); + // TODO: partial entries + let available = entry.payload_length; + let static_token_handle = self.bind_static_token(static_token); + let msg = ReconciliationSendEntry { + entry: LengthyEntry::new(entry, available), + static_token_handle, + dynamic_token, + }; + self.reconciliation_channel.send(msg); + } + Ok(()) + } + + fn split_range_and_send_parts( + &mut self, + store: &mut S, + namespace: NamespaceId, + range: &ThreeDRange, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + ) -> Result<(), Error> { + // TODO: expose this config + let config = SyncConfig::default(); + let mut announce_entries = vec![]; + { + let iter = store.split_range(namespace, &range, &config)?; + let mut iter = iter.peekable(); + while let Some(res) = iter.next() { + let (subrange, action) = res?; + let is_last = iter.peek().is_none(); + let is_final_reply = is_last.then(|| range.clone()); + match action { + SplitAction::SendEntries(count) => { + announce_entries.push((subrange, count, is_final_reply)); + } + SplitAction::SendFingerprint(fingerprint) => { + self.send_fingerprint( + subrange, + fingerprint, + our_handle, + their_handle, + is_final_reply, + ); } } } } + // drop(iter); + for (subrange, count, is_final_reply) in announce_entries.into_iter() { + self.announce_then_send_entries( + store, + namespace, + &subrange, + our_handle, + their_handle, + true, + is_final_reply, + Some(count), + )?; + } + Ok(()) + } + + fn clear_pending_range_if_some( + &mut self, + our_handle: AreaOfInterestHandle, + pending_range: Option, + ) -> Result<(), Error> { + if let Some(range) = pending_range { + if !self.pending_ranges.remove(&(our_handle, range.clone())) { + warn!("received duplicate final reply for range marker"); + return Err(Error::InvalidMessageInCurrentState); + } + } Ok(()) } @@ -465,39 +597,52 @@ impl Session { ) -> Result<(), Error> { match message { Message::ReconciliationSendFingerprint(message) => { + self.reconciliation_started = true; let ReconciliationSendFingerprint { range, - fingerprint, - sender_handle, - receiver_handle, + fingerprint: their_fingerprint, + sender_handle: their_handle, + receiver_handle: our_handle, + is_final_reply_for_range, } = message; - let namespace = - self.range_is_authorised(&range, &receiver_handle, &sender_handle)?; + self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + + let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; let our_fingerprint = store.range_fingerprint(namespace, &range)?; // case 1: fingerprint match. - if our_fingerprint == fingerprint { - let msg = ReconciliationAnnounceEntries { - range, - count: 0, - want_response: false, - will_sort: false, - sender_handle, - receiver_handle, - }; - self.reconciliation_channel - .send(Message::ReconciliationAnnounceEntries(msg)); - // TODO: This is likely incorrect - self.done = true; - } else { - self.announce_entries( + if our_fingerprint == their_fingerprint { + self.announce_empty( + range.clone(), + our_handle, + their_handle, + false, + Some(range.clone()), + )?; + } + // case 2: fingerprint is empty + else if their_fingerprint.is_empty() { + self.announce_then_send_entries( store, namespace, &range, - receiver_handle, - sender_handle, + our_handle, + their_handle, true, + Some(range.clone()), + None, + )?; + } + // case 3: fingerprint doesn't match and is non-empty + else { + // reply by splitting the range into parts unless it is very short + self.split_range_and_send_parts( + store, + namespace, + &range, + our_handle, + their_handle, )?; } } @@ -507,35 +652,34 @@ impl Session { count, want_response, will_sort: _, - sender_handle, - receiver_handle, - } = &message; - if self.them.reconciliation_announce_entries.is_some() { + sender_handle: their_handle, + receiver_handle: our_handle, + is_final_reply_for_range, + } = message; + self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + if self.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } - let namespace = - self.range_is_authorised(&range, &receiver_handle, &sender_handle)?; - if *count == 0 && !want_response { - // todo: what do we need to do here? - self.done = true; - } else { - self.them.reconciliation_announce_entries = Some(message.clone()); - } - if *want_response { - self.announce_entries( + let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; + if want_response { + self.announce_then_send_entries( store, namespace, - range, - *receiver_handle, - *sender_handle, + &range, + our_handle, + their_handle, false, + Some(range.clone()), + None, )?; } + if count != 0 { + self.pending_entries = Some(count); + } } Message::ReconciliationSendEntry(message) => { - let state = self - .them - .reconciliation_announce_entries + let remaining = self + .pending_entries .as_mut() .ok_or(Error::InvalidMessageInCurrentState)?; let ReconciliationSendEntry { @@ -543,17 +687,20 @@ impl Session { static_token_handle, dynamic_token, } = message; - let static_token = self.them.static_tokens.try_get(&static_token_handle)?; - // TODO: avoid clone + let static_token = self + .their_resources + .static_tokens + .try_get(&static_token_handle)?; + // TODO: avoid clone of static token? let authorisation_token = AuthorisationToken::from_parts(static_token.clone(), dynamic_token); let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; store.ingest_entry(&authorised_entry)?; - state.count -= 1; - if state.count == 0 { - self.them.reconciliation_announce_entries = None; + *remaining -= 1; + if *remaining == 0 { + self.pending_entries = None; } } _ => return Err(Error::UnsupportedMessage), @@ -587,8 +734,8 @@ impl Session { handle: &AreaOfInterestHandle, ) -> Result<&SetupBindAreaOfInterest, Error> { match scope { - Scope::Us => self.us.areas_of_interest.try_get(handle), - Scope::Them => self.them.areas_of_interest.try_get(handle), + Scope::Us => self.our_resources.areas_of_interest.try_get(handle), + Scope::Them => self.their_resources.areas_of_interest.try_get(handle), } } @@ -599,8 +746,14 @@ impl Session { ) -> Result<&NamespacePublicKey, Error> { let aoi = self.handle_to_aoi(scope, handle)?; let capability = match scope { - Scope::Us => self.us.capabilities.try_get(&aoi.authorisation)?, - Scope::Them => self.them.capabilities.try_get(&aoi.authorisation)?, + Scope::Us => self + .our_resources + .capabilities + .try_get(&aoi.authorisation)?, + Scope::Them => self + .their_resources + .capabilities + .try_get(&aoi.authorisation)?, }; Ok(capability.granted_namespace()) } @@ -616,11 +769,12 @@ enum Scope { pub struct Channel { inbox: VecDeque, outbox: VecDeque, - // issued_guarantees: usize, + // issued_guarantees: u64, + // available_guarantees: u64, } impl Default for Channel { fn default() -> Self { - Self::with_capacity(1024) + Self::with_capacity(LOGICAL_CHANNEL_CAP) } } @@ -629,22 +783,28 @@ impl Channel { Self { inbox: VecDeque::with_capacity(cap), outbox: VecDeque::with_capacity(cap), + // issued_guarantees: 0, + // available_guarantees: 0, } } - pub fn send(&mut self, value: impl Into) -> bool { + // pub fn recv_guarantees(&mut self, count: u64) { + // self.available_guarantees += count; + // } + // + pub fn can_send(&self) -> bool { + self.outbox.len() < self.outbox.capacity() + } + + pub fn send(&mut self, value: impl Into) { self.outbox.push_back(value.into()); - self.has_inbox_capacity() + // self.available_guarantees -= 1; } fn outbox_drain(&mut self) -> impl Iterator + '_ { self.outbox.drain(..) } - // fn inbox_drain(&mut self) -> impl Iterator + '_ { - // self.inbox.drain(..) - // } - fn inbox_pop(&mut self) -> Option { self.inbox.pop_front() } @@ -663,6 +823,7 @@ impl Channel { } } pub fn remaining_inbox_capacity(&self) -> usize { + // self.inbox.capacity() - self.inbox.len() - self.issued_guarantees as usize self.inbox.capacity() - self.inbox.len() } @@ -670,11 +831,11 @@ impl Channel { self.remaining_inbox_capacity() > 0 } - // pub fn issuable_guarantees(&self) -> usize { - // self.remaining_capacity() - self.issued_guarantees + // pub fn issuable_guarantees(&self) -> u64 { + // self.remaining_inbox_capacity() as u64 - self.issued_guarantees // } // - // pub fn offer_guarantees(&mut self) -> usize { + // pub fn issue_all_guarantees(&mut self) -> u64 { // let val = self.issuable_guarantees(); // self.issued_guarantees += val; // val @@ -704,3 +865,69 @@ fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { } res } + +// struct WorkerHandle { +// tx: flume::Sender<()>, +// join_handle: std::thread::JoinHandle<()>, +// } +// +// struct Worker { +// store: S, +// rx: flume::Receiver<()>, +// sessions: Vec>, +// } +// +// impl Worker { +// pub fn spawn(store: S) -> WorkerHandle { +// let (tx, rx) = flume::bounded(1024); +// let worker = Worker { +// store, +// rx, +// sessions: Default::default(), +// }; +// let join_handle = std::thread::spawn(move || worker.run()); +// WorkerHandle { tx, join_handle } +// } +// +// pub fn run(mut self) { +// let session = &mut self.sessions[0]; +// self.sessions[0] = Some(match session.take().unwrap() { +// SessionState::Waiting(mut session) => { +// let co: CoSession = Gen::new_boxed(|co| async move { +// let res = session.process(&mut self.store); +// co.yield_(Yield::Done(res)).await; +// session +// }); +// SessionState::Running(co) +// } +// SessionState::Running(mut co) => { +// let yielded = co.resume(); +// match yielded { +// GeneratorState::Yielded(y) => { +// match y { +// Yield::Done(_) => {} +// } +// SessionState::Running(co) +// } +// GeneratorState::Complete(session) => SessionState::Done(session), +// } +// } +// SessionState::Done(session) => SessionState::Done(session), +// }) +// } +// } +// +// pub type CoSession<'a> = Gen>; +// +// #[derive(derive_more::Debug)] +// pub enum SessionState { +// Waiting(Session), +// #[debug("Running")] +// Running(CoSession<'static>), +// Done(Session), +// } +// +// #[derive(Debug)] +// pub enum Yield { +// Done(Result), +// } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index dcf545e682..4aca570c89 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,43 +1,85 @@ use std::collections::HashMap; +use anyhow::Result; use iroh_base::hash::Hash; use crate::proto::{ - wgps::{Area, Fingerprint, RangeEnd, ThreeDRange}, + grouping::{path_range_end, subspace_range_end, Area, Range, RangeEnd, ThreeDRange}, + wgps::Fingerprint, willow::{AuthorisedEntry, Entry, NamespaceId}, }; -pub trait Store { +#[derive(Debug, Clone, Copy)] +pub struct SyncConfig { + /// Up to how many values to send immediately, before sending only a fingerprint. + max_set_size: usize, + /// `k` in the protocol, how many splits to generate. at least 2 + split_factor: usize, +} + +impl Default for SyncConfig { + fn default() -> Self { + SyncConfig { + max_set_size: 1, + split_factor: 2, + } + } +} + +pub trait Store: Send + 'static { fn range_fingerprint( &mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> anyhow::Result; + ) -> Result; fn split_range( &mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> anyhow::Result; + config: &SyncConfig, + ) -> Result>>; + + fn count_range(&mut self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + + fn get_entries_with_authorisation<'a>( + &'a mut self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a; + + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()>; fn get_entries<'a>( &'a mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.get_entries_with_authorisation(namespace, range) .map(|e| e.map(|e| e.into_entry())) } +} - fn get_entries_with_authorisation<'a>( - &'a mut self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> impl Iterator> + 'a; +// pub struct StoreHandle { +// pub fn +// } - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> anyhow::Result<()>; -} +// pub enum Op { +// +// } +// #[derive(Debug, Clone)] +// pub struct StoreHandle { +// } +// impl StoreHandle { +// async fn run_op(&self, op: Op) { +// +// } +// } +// /// Extension methods for [`Store`]. +// pub trait StoreExt: Store {} +// impl StoreExt for T {} +/// A very inefficient in-memory store, for testing purposes only #[derive(Debug, Default)] pub struct MemoryStore { entries: HashMap>, @@ -48,7 +90,7 @@ impl Store for MemoryStore { &mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> anyhow::Result { + ) -> Result { let mut fingerprint = Fingerprint::default(); for entry in self.get_entries(namespace, range) { let entry = entry?; @@ -61,55 +103,82 @@ impl Store for MemoryStore { &mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> anyhow::Result { + config: &SyncConfig, + ) -> Result>> { let count = self.get_entries(namespace, range).count(); - let split_if_more_than = 2; - let res = if count > split_if_more_than { - let mut entries: Vec<_> = self - .get_entries(namespace, range) - .filter_map(|e| e.ok()) - .collect(); - let pivot_index = count / 2; - let right = entries.split_off(pivot_index); - let left = entries; - - let pivot = right.first().unwrap(); - let mut range_left = range.clone(); - range_left.paths.end = RangeEnd::Closed(pivot.path.clone()); - range_left.times.end = RangeEnd::Closed(pivot.timestamp); - range_left.subspaces.end = RangeEnd::Closed(pivot.subspace_id); - - let mut range_right = range.clone(); - range_right.paths.start = pivot.path.clone(); - range_right.times.start = pivot.timestamp; - range_right.subspaces.start = pivot.subspace_id; - - let left_part = if left.len() > split_if_more_than { - let fp = Fingerprint::from_entries(left.iter()); - RangeSplitPart::SendFingerprint(range_left, fp) - } else { - RangeSplitPart::SendEntries(range_left, left.len() as u64) - }; - - let right_part = if left.len() > split_if_more_than { - let fp = Fingerprint::from_entries(right.iter()); - RangeSplitPart::SendFingerprint(range_right, fp) - } else { - RangeSplitPart::SendEntries(range_right, right.len() as u64) - }; - - RangeSplit::SendSplit([left_part, right_part]) + if count <= config.max_set_size { + return Ok( + vec![Ok((range.clone(), SplitAction::SendEntries(count as u64)))].into_iter(), + ); + } + let mut entries: Vec = self + .get_entries(namespace, range) + .filter_map(|e| e.ok()) + .collect(); + + entries.sort_by(|e1, e2| e1.as_set_sort_tuple().cmp(&e2.as_set_sort_tuple())); + + let split_index = count / 2; + let mid = entries.get(split_index).expect("not empty"); + let mut ranges = vec![]; + // split in two halves by subspace + if mid.subspace_id != range.subspaces.start { + ranges.push(ThreeDRange::new( + Range::new(range.subspaces.start, RangeEnd::Closed(mid.subspace_id)), + range.paths.clone(), + range.times.clone(), + )); + ranges.push(ThreeDRange::new( + Range::new(mid.subspace_id, range.subspaces.end), + range.paths.clone(), + range.times.clone(), + )); + } + // split by path + else if mid.path != range.paths.start { + ranges.push(ThreeDRange::new( + range.subspaces.clone(), + Range::new( + range.paths.start.clone(), + RangeEnd::Closed(mid.path.clone()), + ), + range.times.clone(), + )); + ranges.push(ThreeDRange::new( + range.subspaces.clone(), + Range::new(mid.path.clone(), range.paths.end.clone()), + range.times.clone(), + )); + // split by time } else { - RangeSplit::SendEntries(range.clone(), count as u64) - }; - Ok(res) + ranges.push(ThreeDRange::new( + range.subspaces.clone(), + range.paths.clone(), + Range::new(range.times.start, RangeEnd::Closed(mid.timestamp)), + )); + ranges.push(ThreeDRange::new( + range.subspaces.clone(), + range.paths.clone(), + Range::new(mid.timestamp, range.times.end), + )); + } + let mut out = vec![]; + for range in ranges { + let fingerprint = self.range_fingerprint(namespace, &range)?; + out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); + } + Ok(out.into_iter()) + } + + fn count_range(&mut self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + Ok(self.get_entries(namespace, range).count() as u64) } fn get_entries_with_authorisation<'a>( &'a mut self, namespace: NamespaceId, range: &ThreeDRange, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.entries .get(&namespace) .into_iter() @@ -120,61 +189,98 @@ impl Store for MemoryStore { .into_iter() } - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> anyhow::Result<()> { + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()> { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); let mut to_remove = vec![]; - for (i, other) in entries.iter().enumerate() { - let old = other.entry(); - if old.subspace_id == new.subspace_id && old.path.is_prefix_of(&new.path) && old >= new + for (i, existing) in entries.iter().enumerate() { + let existing = existing.entry(); + if existing.subspace_id == new.subspace_id + && existing.path.is_prefix_of(&new.path) + && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists return Ok(()); } - if new.subspace_id == old.subspace_id && new.path.is_prefix_of(&old.path) && new > old { + if new.subspace_id == existing.subspace_id + && new.path.is_prefix_of(&existing.path) + && new.is_newer_than(existing) + { to_remove.push(i); } } for i in to_remove { entries.remove(i); } - entries.push(entry.clone()); Ok(()) } } -#[derive(Debug)] -pub enum RangeSplit { - SendEntries(ThreeDRange, u64), - SendSplit([RangeSplitPart; 2]), -} - -impl IntoIterator for RangeSplit { - type IntoIter = RangeSplitIterator; - type Item = RangeSplitPart; - fn into_iter(self) -> Self::IntoIter { - RangeSplitIterator(match self { - RangeSplit::SendEntries(range, len) => { - [Some(RangeSplitPart::SendEntries(range, len)), None] - } - RangeSplit::SendSplit(parts) => parts.map(Option::Some), - }) - } -} +pub type RangeSplit = (ThreeDRange, SplitAction); #[derive(Debug)] -pub struct RangeSplitIterator([Option; 2]); - -impl Iterator for RangeSplitIterator { - type Item = RangeSplitPart; - fn next(&mut self) -> Option { - self.0.iter_mut().filter_map(Option::take).next() - } +pub enum SplitAction { + SendFingerprint(Fingerprint), + SendEntries(u64), } +// #[derive(Debug)] +// pub enum RangeSplit { +// SendEntries(u64), +// Fingerprint() +// Split(Vec<(ThreeDRange, Fingerprint)>), +// } -#[derive(Debug)] -pub enum RangeSplitPart { - SendEntries(ThreeDRange, u64), - SendFingerprint(ThreeDRange, Fingerprint), -} +// pub enum SplitAction { +// SendFingerprint(Fingerprint), +// SendEntries(u64) +// } + +// #[derive(Debug)] +// pub struct RangeFingerprint { +// range: ThreeDRange, +// fingerprint: Fingerprint, +// } +// +// impl RangeFingerprint { +// pub fn new(range: ThreeDRange, fingerprint: Fingerprint) -> Self { +// Self { range, fingerprint } +// } +// } +// pub struct RangePart { +// range: ThreeDRange, +// proceed: Proceed +// } +// +// pub enum Proceed { +// Fingerprint(Fingerprint), +// SendEntries(u64) +// } +// +// impl IntoIterator for RangeSplit { +// type IntoIter = RangeSplitIterator; +// type Item = RangeSplitPart; +// fn into_iter(self) -> Self::IntoIter { +// RangeSplitIterator(match self { +// RangeSplit::SendEntries(range, len) => { +// [Some(RangeSplitPart::SendEntries(range, len)), None] +// } +// RangeSplit::SendSplit(parts) => parts.map(Option::Some), +// }) +// } +// } +// +// #[derive(Debug)] +// pub struct RangeSplitIterator([Option; 2]); +// +// impl Iterator for RangeSplitIterator { +// type Item = RangeSplitPart; +// fn next(&mut self) -> Option { +// self.0.iter_mut().filter_map(Option::take).next() +// } +// } +// #[derive(Debug)] +// pub enum RangeSplitPart { +// SendEntries(ThreeDRange, u64), +// SendFingerprint(ThreeDRange, Fingerprint), +// } From 6acbfcb578d1799301eaf1587c27ca57dc673bb6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 25 Apr 2024 10:58:58 +0200 Subject: [PATCH 007/198] more progress --- iroh-willow/src/proto/grouping.rs | 380 ++++++++++++++++++++++++++ iroh-willow/src/session.rs | 123 ++------- iroh-willow/src/session/reconciler.rs | 161 +++++++++++ iroh-willow/src/store.rs | 79 ------ iroh-willow/src/worker.rs | 0 5 files changed, 564 insertions(+), 179 deletions(-) create mode 100644 iroh-willow/src/proto/grouping.rs create mode 100644 iroh-willow/src/session/reconciler.rs create mode 100644 iroh-willow/src/worker.rs diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs new file mode 100644 index 0000000000..2aba784a14 --- /dev/null +++ b/iroh-willow/src/proto/grouping.rs @@ -0,0 +1,380 @@ +use std::{cmp::Ordering, fmt}; + +use bytes::Bytes; +use ed25519_dalek::Signature; +use iroh_base::hash::Hash; +use iroh_net::key::PublicKey; +use serde::{Deserialize, Serialize}; + +use super::willow::{Entry, Path, SubspaceId, Timestamp}; + +/// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. +#[derive(Debug, Serialize, Deserialize, Clone, Hash, Eq, PartialEq)] +pub struct ThreeDRange { + /// Range of [`SubspaceId`] + pub subspaces: Range, + pub paths: Range, + pub times: Range, +} + +impl ThreeDRange { + pub fn new(subspaces: Range, paths: Range, times: Range) -> Self { + Self { + subspaces, + paths, + times, + } + } + pub fn includes_entry(&self, entry: &Entry) -> bool { + self.subspaces.includes(&entry.subspace_id) + && self.paths.includes(&entry.path) + && self.times.includes(&entry.timestamp) + } + + pub fn is_empty(&self) -> bool { + self.subspaces.is_empty() || self.paths.is_empty() || self.times.is_empty() + } + + pub fn all() -> Self { + Self::new(Default::default(), Default::default(), Default::default()) + } + + pub fn intersection(&self, other: &ThreeDRange) -> Option { + let paths = self.paths.intersection(&other.paths)?; + let times = self.times.intersection(&other.times)?; + let subspaces = self.subspaces.intersection(&other.subspaces)?; + Some(Self { + paths, + times, + subspaces, + }) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] +pub struct Range { + pub start: T, + pub end: RangeEnd, +} + +impl From<(T, RangeEnd)> for Range { + fn from((start, end): (T, RangeEnd)) -> Self { + Range { start, end } + } +} + +impl Range { + pub fn new(start: T, end: RangeEnd) -> Self { + Self { start, end } + } + pub fn is_closed(&self) -> bool { + matches!(self.end, RangeEnd::Closed(_)) + } + pub fn is_open(&self) -> bool { + matches!(self.end, RangeEnd::Open) + } +} +impl Range { + fn all() -> Self { + Self::new(T::default(), RangeEnd::Open) + } +} +impl Default for Range { + fn default() -> Self { + Self::all() + } +} +impl Range { + fn intersection(&self, other: &Self) -> Option { + let start = (&self.start).max(&other.start); + let end = match (&self.end, &other.end) { + (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(b), + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(a.min(&b)), + (RangeEnd::Closed(a), RangeEnd::Open) => RangeEnd::Closed(a), + (RangeEnd::Open, RangeEnd::Open) => RangeEnd::Open, + }; + match end { + RangeEnd::Open => Some(Self::new(start.clone(), RangeEnd::Open)), + RangeEnd::Closed(t) if t >= start => { + Some(Self::new(start.clone(), RangeEnd::Closed(t.clone()))) + } + RangeEnd::Closed(_) => None, + } + } +} +impl Range { + pub fn is_empty(&self) -> bool { + match &self.end { + RangeEnd::Open => false, + RangeEnd::Closed(t) => t <= &self.start, + } + } +} + +impl Range { + pub fn includes(&self, value: &T) -> bool { + value >= &self.start && self.end.includes(value) + } + + pub fn includes_range(&self, other: &Range) -> bool { + self.start <= other.start && self.end >= other.end + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy, Hash)] +pub enum RangeEnd { + Closed(T), + Open, +} + +impl PartialOrd for RangeEnd { + fn partial_cmp(&self, other: &Self) -> Option { + match (self, other) { + (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), + (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), + (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), + } + } +} + +// impl PartialOrd for RangeEnd { +// fn partial_cmp(&self, other: &T) -> Option { +// // match (self, other) { +// // (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), +// // (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), +// // (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), +// // (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), +// // } +// } +// } + +impl RangeEnd { + pub fn includes(&self, value: &T) -> bool { + match self { + Self::Open => true, + Self::Closed(end) => value < end, + } + } +} + +/// A grouping of Entries that are among the newest in some store. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct AreaOfInterest { + /// To be included in this AreaOfInterest, an Entry must be included in the area. + pub area: Area, + /// To be included in this AreaOfInterest, an Entry’s timestamp must be among the max_count greatest Timestamps, unless max_count is zero. + pub max_count: u64, + /// The total payload_lengths of all included Entries is at most max_size, unless max_size is zero. + pub max_size: u64, +} + +impl AreaOfInterest { + pub fn full() -> Self { + Self { + area: Area::full(), + max_count: 0, + max_size: 0, + } + } +} + +/// A grouping of Entries. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct Area { + /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. + pub subspace_id: SubspaceArea, + /// To be included in this Area, an Entry’s path must be prefixed by the path. + pub path: Path, + /// To be included in this Area, an Entry’s timestamp must be included in the times. + pub times: Range, +} + +impl Area { + pub const fn new(subspace_id: SubspaceArea, path: Path, times: Range) -> Self { + Self { + subspace_id, + path, + times, + } + } + + pub fn full() -> Self { + Self::new(SubspaceArea::Any, Path::empty(), Range::::FULL) + } + + pub fn empty() -> Self { + Self::new(SubspaceArea::Any, Path::empty(), Range::::EMPTY) + } + + pub fn subspace(subspace_id: SubspaceId) -> Self { + Self::new( + SubspaceArea::Id(subspace_id), + Path::empty(), + Range::::FULL, + ) + } + + pub fn includes_entry(&self, entry: &Entry) -> bool { + self.subspace_id.includes_subspace(&entry.subspace_id) + && self.path.is_prefix_of(&entry.path) + && self.times.includes(&entry.timestamp) + } + + pub fn includes_area(&self, other: &Area) -> bool { + self.subspace_id.includes(&other.subspace_id) + && self.path.is_prefix_of(&other.path) + && self.times.includes_range(&other.times) + } + + pub fn includes_range(&self, range: &ThreeDRange) -> bool { + let path_start = self.path.is_prefix_of(&range.paths.start); + let path_end = match &range.paths.end { + RangeEnd::Open => true, + RangeEnd::Closed(path) => self.path.is_prefix_of(path), + }; + let subspace_start = self.subspace_id.includes_subspace(&range.subspaces.start); + let subspace_end = match range.subspaces.end { + RangeEnd::Open => true, + RangeEnd::Closed(subspace) => self.subspace_id.includes_subspace(&subspace), + }; + subspace_start + && subspace_end + && path_start + && path_end + && self.times.includes_range(&range.times) + } + + pub fn into_range(&self) -> ThreeDRange { + let subspace_start = match self.subspace_id { + SubspaceArea::Any => SubspaceId::default(), + SubspaceArea::Id(id) => id, + }; + let subspace_end = match self.subspace_id { + SubspaceArea::Any => RangeEnd::Open, + SubspaceArea::Id(id) => subspace_range_end(id), + }; + let path_start = self.path.clone(); + let path_end = path_range_end(&self.path); + ThreeDRange { + subspaces: Range::new(subspace_start, subspace_end), + paths: Range::new(path_start, path_end), + times: self.times.clone(), + } + } + + pub fn intersection(&self, other: &Area) -> Option { + let subspace_id = self.subspace_id.intersection(&other.subspace_id)?; + let path = self.path.intersection(&other.path)?; + let times = self.times.intersection(&other.times)?; + Some(Self { + subspace_id, + times, + path, + }) + } +} + +pub fn path_range_end(path: &Path) -> RangeEnd { + if path.is_empty() { + RangeEnd::Open + } else { + let mut out = vec![]; + for component in path.iter().rev() { + // component can be incremented + if out.is_empty() && component.iter().any(|x| *x != 0xff) { + let mut bytes = Vec::with_capacity(component.len()); + bytes.copy_from_slice(&component); + let incremented = increment_by_one(&mut bytes); + debug_assert!(incremented, "checked above"); + out.push(Bytes::from(bytes)); + break; + // component cannot be incremented + } else if out.is_empty() { + continue; + } else { + out.push(component.clone()) + } + } + if out.is_empty() { + RangeEnd::Open + } else { + out.reverse(); + RangeEnd::Closed(Path::from_bytes_unchecked(out)) + } + } + // let mut bytes = id.to_bytes(); + // if increment_by_one(&mut bytes) { + // RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) + // } else { + // RangeEnd::Open + // } +} + +pub fn subspace_range_end(id: SubspaceId) -> RangeEnd { + let mut bytes = id.to_bytes(); + if increment_by_one(&mut bytes) { + RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) + } else { + RangeEnd::Open + } +} + +/// Increment a byte string by one, by incrementing the last byte that is not 255 by one. +/// +/// Returns false if all bytes are 255. +fn increment_by_one(value: &mut [u8]) -> bool { + for char in value.iter_mut().rev() { + if *char != 255 { + *char += 1; + return true; + } else { + *char = 0; + } + } + false +} + +impl Range { + pub const FULL: Self = Self { + start: 0, + end: RangeEnd::Open, + }; + + pub const EMPTY: Self = Self { + start: 0, + end: RangeEnd::Closed(0), + }; +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +pub enum SubspaceArea { + Any, + Id(SubspaceId), +} + +impl SubspaceArea { + fn includes(&self, other: &SubspaceArea) -> bool { + match (self, other) { + (SubspaceArea::Any, SubspaceArea::Any) => true, + (SubspaceArea::Id(_), SubspaceArea::Any) => false, + (_, SubspaceArea::Id(id)) => self.includes_subspace(id), + } + } + fn includes_subspace(&self, subspace_id: &SubspaceId) -> bool { + match self { + Self::Any => true, + Self::Id(id) => id == subspace_id, + } + } + + fn intersection(&self, other: &Self) -> Option { + match (self, other) { + (Self::Any, Self::Any) => Some(Self::Any), + (Self::Id(a), Self::Any) => Some(Self::Id(*a)), + (Self::Any, Self::Id(b)) => Some(Self::Id(*b)), + (Self::Id(a), Self::Id(b)) if a == b => Some(Self::Id(*a)), + (Self::Id(_a), Self::Id(_b)) => None, + } + } +} diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 09c51c1157..4808b8d659 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -157,29 +157,6 @@ pub enum Role { Betty, Alfie, } - -#[derive(Debug)] -pub struct Session { - role: Role, - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - init: Option, - challenge: Option, - - their_maximum_payload_size: usize, - - control_channel: Channel, - reconciliation_channel: Channel, - - our_current_aoi: Option, - - our_resources: Resources, - their_resources: Resources, - pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pending_entries: Option, - reconciliation_started: bool, -} - #[derive(Debug)] pub struct Challenges { ours: AccessChallenge, @@ -211,17 +188,32 @@ pub struct Resources { #[derive(Debug)] pub struct SessionInit { pub user_secret_key: UserSecretKey, - // TODO: allow multiple capabilities + // TODO: allow multiple capabilities? pub capability: ReadCapability, - // TODO: allow multiple areas of interest + // TODO: allow multiple areas of interest? pub area_of_interest: AreaOfInterest, } #[derive(Debug)] -pub enum Yield { - InboxEmpty, - OutboxFull, - Reconciled, +pub struct Session { + role: Role, + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + init: Option, + challenge: Option, + // init_state: SessionInit, + their_maximum_payload_size: usize, + + control_channel: Channel, + reconciliation_channel: Channel, + + our_current_aoi: Option, + + our_resources: Resources, + their_resources: Resources, + pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pending_entries: Option, + reconciliation_started: bool, } impl Session { @@ -344,11 +336,8 @@ impl Session { self.our_nonce, msg.nonce, )); - if let Some(init) = self.init.take() { - self.init(&init)?; - } else { - unreachable!("checked above with self.challeng") - } + let init = self.init.take().expect("unreachable"); + self.init(&init)?; } Message::SetupBindReadCapability(msg) => { let challenge = self @@ -865,69 +854,3 @@ fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { } res } - -// struct WorkerHandle { -// tx: flume::Sender<()>, -// join_handle: std::thread::JoinHandle<()>, -// } -// -// struct Worker { -// store: S, -// rx: flume::Receiver<()>, -// sessions: Vec>, -// } -// -// impl Worker { -// pub fn spawn(store: S) -> WorkerHandle { -// let (tx, rx) = flume::bounded(1024); -// let worker = Worker { -// store, -// rx, -// sessions: Default::default(), -// }; -// let join_handle = std::thread::spawn(move || worker.run()); -// WorkerHandle { tx, join_handle } -// } -// -// pub fn run(mut self) { -// let session = &mut self.sessions[0]; -// self.sessions[0] = Some(match session.take().unwrap() { -// SessionState::Waiting(mut session) => { -// let co: CoSession = Gen::new_boxed(|co| async move { -// let res = session.process(&mut self.store); -// co.yield_(Yield::Done(res)).await; -// session -// }); -// SessionState::Running(co) -// } -// SessionState::Running(mut co) => { -// let yielded = co.resume(); -// match yielded { -// GeneratorState::Yielded(y) => { -// match y { -// Yield::Done(_) => {} -// } -// SessionState::Running(co) -// } -// GeneratorState::Complete(session) => SessionState::Done(session), -// } -// } -// SessionState::Done(session) => SessionState::Done(session), -// }) -// } -// } -// -// pub type CoSession<'a> = Gen>; -// -// #[derive(derive_more::Debug)] -// pub enum SessionState { -// Waiting(Session), -// #[debug("Running")] -// Running(CoSession<'static>), -// Done(Session), -// } -// -// #[derive(Debug)] -// pub enum Yield { -// Done(Result), -// } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs new file mode 100644 index 0000000000..fd48d3baea --- /dev/null +++ b/iroh-willow/src/session/reconciler.rs @@ -0,0 +1,161 @@ +use std::collections::VecDeque; + +use super::Error; +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use tracing::info; + +#[derive(Debug)] +pub enum Yield { + Init, + OutboxFull, + InboxEmpty, + AllDone(usize), +} + +// pub type Coroutine = genawaiter::Coroutine + +#[derive(Debug)] +pub struct Reconciler { + // gen: Option>, // co: genawaiter::sync::Co>, + state: State, +} + +#[derive(Debug)] +pub struct State { + outbox: VecDeque, + inbox: VecDeque, + count: usize, +} + +pub struct WorkerState { + inbox: flume::Receiver, + outbox: flume::Sender, + sum: usize, +} + +pub struct NetState { + outbox: flume::Receiver, + inbox: flume::Sender, +} + +fn create_state(cap: usize) -> (NetState, WorkerState) { + let (outbox_send, outbox_recv) = flume::bounded(cap); + let (inbox_send, inbox_recv) = flume::bounded(cap); + let ws = WorkerState { + inbox: inbox_recv, + outbox: outbox_send, + sum: 0, + }; + let ns = NetState { + inbox: inbox_send, + outbox: outbox_recv, + }; + (ns, ws) +} + +enum WorkerToNet { + MayResume, + Yield, + Finished, + Out(i32), +} + +async fn run_net( + ns: NetState, + recv: flume::Receiver, + send: flume::Sender, +) -> anyhow::Result<()> { + loop { + let mut pending_message = None; + // let mut yieled = true; + tokio::select! { + next = recv.recv_async(), if pending_message.is_none( )=> { + let msg = next?; + // if yielded { + // yielded = false; + // notify_worker(); + // } + if let Err(msg) = ns.inbox.try_send(msg) { + pending_message.insert(msg.into_inner()); + } + } + out = ns.outbox.recv_async() => { + let out = out?; + match out { + WorkerToNet::MayResume => { + if let Some(msg) = pending_message.take() { + ns.inbox.send_async(msg).await?; + } + } + WorkerToNet::Out(msg) => { + send.send_async(msg).await?; + } + WorkerToNet::Finished => break, + WorkerToNet::Yield => { + // yielded = true; + } + } + } + } + } + Ok(()) +} + +// struct SharedState + +impl Reconciler { + pub fn run_worker(&mut self) { + let mut gen = Gen::new(|co| Self::producer(co)); + loop { + match gen.resume_with(&mut self.state) { + GeneratorState::Yielded(val) => { + info!("Yielded: {val:?}") + } + GeneratorState::Complete(res) => { + info!("Complete: {res:?}") + } + } + } + } + + pub fn push_inbox(&mut self, msg: i32) -> bool { + self.state.inbox.push_back(msg); + if self.state.inbox.len() == 2 { + false + } else { + true + } + } + + pub fn drain_outbox(&mut self) -> impl Iterator + '_ { + self.state.outbox.drain(..) + } + + async fn producer(co: Co) -> Result<(), Error> { + loop { + let state = co.yield_(Yield::Init).await; + // exit condition + if state.count > 6 { + co.yield_(Yield::AllDone(state.count)).await; + return Ok(()); + } + + let next = state.inbox.pop_front(); + match next { + None => { + co.yield_(Yield::InboxEmpty).await; + continue; + } + Some(msg) => { + state.outbox.push_back(msg * 17); + if state.outbox.len() == 3 { + co.yield_(Yield::OutboxFull).await; + } + } + } + } + } +} diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 4aca570c89..f5a0ca4082 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -60,25 +60,6 @@ pub trait Store: Send + 'static { } } -// pub struct StoreHandle { -// pub fn -// } - -// pub enum Op { -// -// } -// #[derive(Debug, Clone)] -// pub struct StoreHandle { -// } -// impl StoreHandle { -// async fn run_op(&self, op: Op) { -// -// } -// } -// /// Extension methods for [`Store`]. -// pub trait StoreExt: Store {} -// impl StoreExt for T {} - /// A very inefficient in-memory store, for testing purposes only #[derive(Debug, Default)] pub struct MemoryStore { @@ -224,63 +205,3 @@ pub enum SplitAction { SendFingerprint(Fingerprint), SendEntries(u64), } -// #[derive(Debug)] -// pub enum RangeSplit { -// SendEntries(u64), -// Fingerprint() -// Split(Vec<(ThreeDRange, Fingerprint)>), -// } - -// pub enum SplitAction { -// SendFingerprint(Fingerprint), -// SendEntries(u64) -// } - -// #[derive(Debug)] -// pub struct RangeFingerprint { -// range: ThreeDRange, -// fingerprint: Fingerprint, -// } -// -// impl RangeFingerprint { -// pub fn new(range: ThreeDRange, fingerprint: Fingerprint) -> Self { -// Self { range, fingerprint } -// } -// } -// pub struct RangePart { -// range: ThreeDRange, -// proceed: Proceed -// } -// -// pub enum Proceed { -// Fingerprint(Fingerprint), -// SendEntries(u64) -// } -// -// impl IntoIterator for RangeSplit { -// type IntoIter = RangeSplitIterator; -// type Item = RangeSplitPart; -// fn into_iter(self) -> Self::IntoIter { -// RangeSplitIterator(match self { -// RangeSplit::SendEntries(range, len) => { -// [Some(RangeSplitPart::SendEntries(range, len)), None] -// } -// RangeSplit::SendSplit(parts) => parts.map(Option::Some), -// }) -// } -// } -// -// #[derive(Debug)] -// pub struct RangeSplitIterator([Option; 2]); -// -// impl Iterator for RangeSplitIterator { -// type Item = RangeSplitPart; -// fn next(&mut self) -> Option { -// self.0.iter_mut().filter_map(Option::take).next() -// } -// } -// #[derive(Debug)] -// pub enum RangeSplitPart { -// SendEntries(ThreeDRange, u64), -// SendFingerprint(ThreeDRange, Fingerprint), -// } diff --git a/iroh-willow/src/worker.rs b/iroh-willow/src/worker.rs new file mode 100644 index 0000000000..e69de29bb2 From aff8c44438aba8e53802699b9354101ae00710f6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 25 Apr 2024 11:55:24 +0200 Subject: [PATCH 008/198] cleanups --- iroh-willow/src/lib.rs | 2 +- iroh-willow/src/net.rs | 21 ++- iroh-willow/src/proto.rs | 2 +- iroh-willow/src/proto/grouping.rs | 6 +- iroh-willow/src/proto/meadowcap.rs | 1 - iroh-willow/src/proto/wgps.rs | 16 +-- iroh-willow/src/proto/willow.rs | 7 +- iroh-willow/src/session.rs | 223 +++++++++++++++-------------- iroh-willow/src/store.rs | 7 +- 9 files changed, 142 insertions(+), 143 deletions(-) diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 06567a4353..7d308497a5 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,6 +1,6 @@ //! Implementation of willow -#![allow(missing_docs, unused_imports, dead_code)] +#![allow(missing_docs)] pub mod net; pub mod proto; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index e61b091ca1..2e7048c16d 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,7 +1,7 @@ use std::{pin::Pin, task::Poll}; use anyhow::{ensure, Context}; -use futures::{FutureExt, SinkExt, Stream, TryFutureExt}; +use futures::{SinkExt, Stream}; use iroh_base::hash::Hash; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio_stream::StreamExt; @@ -9,12 +9,9 @@ use tokio_util::codec::{Decoder, FramedRead, FramedWrite}; use tracing::{debug, instrument}; use crate::{ - proto::wgps::{ - AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, CHALLENGE_LENGTH, - MAX_PAYLOAD_SIZE_POWER, - }, + proto::wgps::{AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER}, session::{Role, Session, SessionInit}, - store::{MemoryStore, Store}, + store::Store, }; use self::codec::WillowCodec; @@ -33,7 +30,7 @@ async fn next_if_ready( } #[instrument(skip_all, fields(role=?role))] -async fn run( +pub async fn run( store: &mut S, conn: quinn::Connection, role: Role, @@ -72,7 +69,7 @@ async fn run( // keep pushing already buffered messages while let Some(message) = next_if_ready(&mut reader).await { let message = message.context("error from reader")?; - debug!(%message,awaited=false, "recv"); + debug!(%message,awaited=false, "recv"); // TODO: stop when session is full session.recv(message.into()); } @@ -129,7 +126,7 @@ mod tests { grouping::{AreaOfInterest, ThreeDRange}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, - willow::{AuthorisedEntry, Entry, Path, SubspaceId}, + willow::{Entry, Path, SubspaceId}, }, session::{Role, SessionInit}, store::{MemoryStore, Store}, @@ -287,10 +284,12 @@ mod tests { store: &mut S, namespace: NamespaceId, ) -> Vec<(SubspaceId, Path)> { - store + let mut entries: Vec<_> = store .get_entries(namespace, &ThreeDRange::all()) .filter_map(|r| r.ok()) .map(|e| (e.subspace_id, e.path)) - .collect() + .collect(); + entries.sort(); + entries } } diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index cf3e8947ab..d3c5e179e2 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,5 +1,5 @@ +pub mod grouping; pub mod keys; pub mod meadowcap; -pub mod grouping; pub mod wgps; pub mod willow; diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 2aba784a14..c1a127288a 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,9 +1,7 @@ -use std::{cmp::Ordering, fmt}; +use std::cmp::Ordering; use bytes::Bytes; -use ed25519_dalek::Signature; -use iroh_base::hash::Hash; -use iroh_net::key::PublicKey; + use serde::{Deserialize, Serialize}; use super::willow::{Entry, Path, SubspaceId, Timestamp}; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 592ad563eb..fe6168e074 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use tracing::debug; use super::{ grouping::Area, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 4a97f8ec60..587f5cf6a6 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,17 +1,15 @@ -use std::{cmp::Ordering, fmt}; +use std::fmt; -use bytes::Bytes; -use ed25519_dalek::Signature; use iroh_base::hash::Hash; -use iroh_net::key::PublicKey; + use serde::{Deserialize, Serialize}; use super::{ - grouping::{Area, AreaOfInterest, SubspaceArea, ThreeDRange}, - keys, meadowcap, + grouping::{Area, AreaOfInterest, ThreeDRange}, + meadowcap, willow::{ - AuthorisationToken, AuthorisedEntry, Entry, Path, PossiblyAuthorisedEntry, SubspaceId, - Timestamp, Unauthorised, DIGEST_LENGTH, + AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, + DIGEST_LENGTH, }, }; @@ -275,7 +273,7 @@ pub struct ReconciliationAnnounceEntries { pub receiver_handle: AreaOfInterestHandle, /// If this is this the last reply to range received via [`ReconciliationSendFingerprint`] or [`ReconciliationAnnounceEntries`] /// from the other peer, set to that range to indicate to the other peer that no further replies for that range will be sent - /// + /// /// TODO: This is a spec deviation, discuss further and remove or upstream pub is_final_reply_for_range: Option, } diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 41ff71e208..b5c5cf414d 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -3,13 +3,10 @@ use std::{cmp::Ordering, sync::Arc}; use bytes::Bytes; use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; -use zerocopy::{native_endian::U64, FromBytes, IntoBytes, KnownLayout, NoCell, Unaligned}; use super::{ - keys::{self, UserSecretKey, PUBLIC_KEY_LENGTH}, - meadowcap::{ - self, attach_authorisation, create_token, is_authorised_write, InvalidParams, McCapability, - }, + keys::{self, UserSecretKey}, + meadowcap::{self, attach_authorisation, is_authorised_write, InvalidParams, McCapability}, }; /// A type for identifying namespaces. diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 4808b8d659..e12743dbc0 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,34 +1,28 @@ use std::{ - collections::{hash_map, HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, fmt, }; -use anyhow::bail; use ed25519_dalek::SignatureError; -use futures::future::BoxFuture; -use genawaiter::{sync::Gen, GeneratorState}; + use iroh_base::hash::Hash; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, trace, warn}; use crate::{ proto::{ - grouping::{Area, AreaOfInterest, ThreeDRange}, - keys::{NamespaceId, NamespacePublicKey, UserSecretKey, UserSignature}, - meadowcap::{is_authorised_write, InvalidCapability}, + grouping::{AreaOfInterest, ThreeDRange}, + keys::{NamespaceId, NamespacePublicKey, UserPublicKey, UserSecretKey, UserSignature}, + meadowcap::InvalidCapability, wgps::{ AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, ControlAbsolve, ControlAnnounceDropping, ControlApologise, - ControlFreeHandle, ControlIssueGuarantee, ControlPlead, Fingerprint, Handle, - HandleType, IntersectionHandle, LengthyEntry, LogicalChannel, Message, ReadCapability, - ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, - StaticTokenHandle, CHALLENGE_HASH_LENGTH, - }, - willow::{ - AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, + CommitmentReveal, Fingerprint, Handle, LengthyEntry, LogicalChannel, Message, + ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, + SetupBindStaticToken, StaticToken, StaticTokenHandle, }, + willow::{AuthorisationToken, AuthorisedEntry, Unauthorised}, }, - store::{RangeSplit, SplitAction, Store, SyncConfig}, + store::{SplitAction, Store, SyncConfig}, }; const LOGICAL_CHANNEL_CAP: usize = 128; @@ -48,13 +42,6 @@ impl Default for ResourceMap { } } -#[derive(Debug)] -enum ResourceState { - Active, - WeProposedFree, - ToBeDeleted, -} - impl ResourceMap where H: Handle, @@ -91,6 +78,29 @@ where } } +// #[derive(Debug)] +// enum ResourceState { +// Active, +// WeProposedFree, +// ToBeDeleted, +// } + +#[derive(Debug)] +struct Resource { + value: V, + // state: ResourceState, + // unprocessed_messages: usize, +} +impl Resource { + pub fn new(value: V) -> Self { + Self { + value, + // state: ResourceState::Active, + // unprocessed_messages: 0, + } + } +} + #[derive(Debug, thiserror::Error)] pub enum Error { #[error("local store failed")] @@ -136,47 +146,11 @@ impl From for Error { } } -#[derive(Debug)] -struct Resource { - value: V, - state: ResourceState, - unprocessed_messages: usize, -} -impl Resource { - pub fn new(value: V) -> Self { - Self { - value, - state: ResourceState::Active, - unprocessed_messages: 0, - } - } -} - #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Role { Betty, Alfie, } -#[derive(Debug)] -pub struct Challenges { - ours: AccessChallenge, - theirs: AccessChallenge, -} - -impl Challenges { - pub fn from_nonces( - our_role: Role, - our_nonce: AccessChallenge, - their_nonce: AccessChallenge, - ) -> Self { - let ours = match our_role { - Role::Alfie => bitwise_xor(our_nonce, their_nonce), - Role::Betty => bitwise_xor_complement(our_nonce, their_nonce), - }; - let theirs = bitwise_complement(ours); - Self { ours, theirs } - } -} #[derive(Debug, Default)] pub struct Resources { @@ -194,26 +168,85 @@ pub struct SessionInit { pub area_of_interest: AreaOfInterest, } +#[derive(Debug)] +enum ChallengeState { + Committed { + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + }, + Revealed { + ours: AccessChallenge, + theirs: AccessChallenge, + }, +} + +impl ChallengeState { + pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { + match self { + Self::Committed { + our_nonce, + received_commitment, + } => { + if Hash::new(&their_nonce).as_bytes() != received_commitment { + return Err(Error::BrokenCommittement); + } + let ours = match our_role { + Role::Alfie => bitwise_xor(*our_nonce, their_nonce), + Role::Betty => bitwise_xor_complement(*our_nonce, their_nonce), + }; + let theirs = bitwise_complement(ours); + *self = Self::Revealed { ours, theirs }; + Ok(()) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + pub fn sign_ours(&self, secret_key: &UserSecretKey) -> Result { + let challenge = self.get_ours()?; + let signature = secret_key.sign(challenge); + Ok(signature) + } + + pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { + let their_challenge = self.get_theirs()?; + user_key.verify(their_challenge, &signature)?; + Ok(()) + } + + fn get_ours(&self) -> Result<&AccessChallenge, Error> { + match self { + Self::Revealed { ours, .. } => Ok(&ours), + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + fn get_theirs(&self) -> Result<&AccessChallenge, Error> { + match self { + Self::Revealed { theirs, .. } => Ok(&theirs), + _ => Err(Error::InvalidMessageInCurrentState), + } + } +} + #[derive(Debug)] pub struct Session { role: Role, - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - init: Option, - challenge: Option, - // init_state: SessionInit, - their_maximum_payload_size: usize, + _their_maximum_payload_size: usize, + + init: SessionInit, + challenge_state: ChallengeState, control_channel: Channel, reconciliation_channel: Channel, - our_current_aoi: Option, - our_resources: Resources, their_resources: Resources, pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pending_entries: Option, + reconciliation_started: bool, + our_current_aoi: Option, } impl Session { @@ -224,18 +257,20 @@ impl Session { received_commitment: ChallengeHash, init: SessionInit, ) -> Self { - let mut this = Self { - role: our_role, + let challenge_state = ChallengeState::Committed { our_nonce, - challenge: None, - their_maximum_payload_size, received_commitment, + }; + let mut this = Self { + role: our_role, + _their_maximum_payload_size: their_maximum_payload_size, + challenge_state, control_channel: Default::default(), reconciliation_channel: Default::default(), our_resources: Default::default(), their_resources: Default::default(), our_current_aoi: None, // config - init: Some(init), + init, pending_ranges: Default::default(), pending_entries: Default::default(), reconciliation_started: false, @@ -279,7 +314,8 @@ impl Session { && self.pending_entries.is_none()) } - fn init(&mut self, init: &SessionInit) -> Result<(), Error> { + fn setup(&mut self) -> Result<(), Error> { + let init = &self.init; let area_of_interest = init.area_of_interest.clone(); let capability = init.capability.clone(); @@ -292,7 +328,7 @@ impl Session { let intersection_handle = 0.into(); // register read capability - let signature = self.sign_challenge(&init.user_secret_key)?; + let signature = self.challenge_state.sign_ours(&init.user_secret_key)?; let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); let msg = SetupBindReadCapability { capability, @@ -313,41 +349,16 @@ impl Session { Ok(()) } - fn sign_challenge(&self, secret_key: &UserSecretKey) -> Result { - let challenge = self - .challenge - .as_ref() - .ok_or(Error::InvalidMessageInCurrentState)?; - let signature = secret_key.sign(&challenge.ours); - Ok(signature) - } - fn process_control(&mut self, store: &mut S, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { - if self.challenge.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - if Hash::new(&msg.nonce).as_bytes() != &self.received_commitment { - return Err(Error::BrokenCommittement); - } - self.challenge = Some(Challenges::from_nonces( - self.role, - self.our_nonce, - msg.nonce, - )); - let init = self.init.take().expect("unreachable"); - self.init(&init)?; + self.challenge_state.reveal(self.role, msg.nonce)?; + self.setup()?; } Message::SetupBindReadCapability(msg) => { - let challenge = self - .challenge - .as_ref() - .ok_or(Error::InvalidMessageInCurrentState)?; msg.capability.validate()?; - msg.capability - .receiver() - .verify(&challenge.theirs, &msg.signature)?; + self.challenge_state + .verify(msg.capability.receiver(), &msg.signature)?; // TODO: verify intersection handle self.their_resources.capabilities.bind(msg.capability); } @@ -393,7 +404,6 @@ impl Session { handle } - /// Uses the blocking [`Store`] and thus may only be called in the worker thread. fn init_reconciliation( &mut self, store: &mut S, @@ -548,7 +558,6 @@ impl Session { } } } - // drop(iter); for (subrange, count, is_final_reply) in announce_entries.into_iter() { self.announce_then_send_entries( store, diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index f5a0ca4082..94bb0cb22e 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,10 +1,9 @@ use std::collections::HashMap; use anyhow::Result; -use iroh_base::hash::Hash; use crate::proto::{ - grouping::{path_range_end, subspace_range_end, Area, Range, RangeEnd, ThreeDRange}, + grouping::{Range, RangeEnd, ThreeDRange}, wgps::Fingerprint, willow::{AuthorisedEntry, Entry, NamespaceId}, }; @@ -12,9 +11,9 @@ use crate::proto::{ #[derive(Debug, Clone, Copy)] pub struct SyncConfig { /// Up to how many values to send immediately, before sending only a fingerprint. - max_set_size: usize, + pub max_set_size: usize, /// `k` in the protocol, how many splits to generate. at least 2 - split_factor: usize, + pub split_factor: usize, } impl Default for SyncConfig { From f5a6724e9a0e1b565934d0f644a9f4f48c627352 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 25 Apr 2024 12:12:33 +0200 Subject: [PATCH 009/198] cleanup --- iroh-willow/src/net.rs | 4 +- iroh-willow/src/proto/grouping.rs | 66 ++++++++++++++++++++++++------- 2 files changed, 54 insertions(+), 16 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 2e7048c16d..cf8d1b3f85 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -275,7 +275,7 @@ mod tests { } fn get_entries(store: &mut S, namespace: NamespaceId) -> HashSet { store - .get_entries(namespace, &ThreeDRange::all()) + .get_entries(namespace, &ThreeDRange::full()) .filter_map(Result::ok) .collect() } @@ -285,7 +285,7 @@ mod tests { namespace: NamespaceId, ) -> Vec<(SubspaceId, Path)> { let mut entries: Vec<_> = store - .get_entries(namespace, &ThreeDRange::all()) + .get_entries(namespace, &ThreeDRange::full()) .filter_map(|r| r.ok()) .map(|e| (e.subspace_id, e.path)) .collect(); diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index c1a127288a..ab0588fcf2 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; use bytes::Bytes; - use serde::{Deserialize, Serialize}; use super::willow::{Entry, Path, SubspaceId, Timestamp}; @@ -11,11 +10,14 @@ use super::willow::{Entry, Path, SubspaceId, Timestamp}; pub struct ThreeDRange { /// Range of [`SubspaceId`] pub subspaces: Range, + /// Range of [`Path`] pub paths: Range, + /// Range of [`Timestamp`] pub times: Range, } impl ThreeDRange { + /// Create a new range from its parts. pub fn new(subspaces: Range, paths: Range, times: Range) -> Self { Self { subspaces, @@ -23,20 +25,34 @@ impl ThreeDRange { times, } } + + /// Create a new range that covers everything. + pub fn full() -> Self { + Self::new(Default::default(), Default::default(), Default::default()) + } + + /// Create a new empty range. + pub fn empty() -> Self { + Self::new( + Default::default(), + Default::default(), + Range::new(0, RangeEnd::Closed(0)), + ) + } + + /// Returns `true` if `entry` is included in this range. pub fn includes_entry(&self, entry: &Entry) -> bool { self.subspaces.includes(&entry.subspace_id) && self.paths.includes(&entry.path) && self.times.includes(&entry.timestamp) } + /// Returns `true` if this range is completely empty. pub fn is_empty(&self) -> bool { self.subspaces.is_empty() || self.paths.is_empty() || self.times.is_empty() } - pub fn all() -> Self { - Self::new(Default::default(), Default::default(), Default::default()) - } - + /// Get the intersection between this and another range. pub fn intersection(&self, other: &ThreeDRange) -> Option { let paths = self.paths.intersection(&other.paths)?; let times = self.times.intersection(&other.times)?; @@ -49,9 +65,18 @@ impl ThreeDRange { } } +/// Ranges are simple, one-dimensional ways of grouping Entries. +/// +/// They can express groupings such as “last week’s Entries”. A range is either a closed range or an open range. +/// A closed range consists of a start value and an end value, an open range consists only of a start value. +/// A range includes all values greater than or equal to its start value and strictly less than its end value +/// (if it is has one). A range is empty if it includes no values. #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] pub struct Range { + /// A value must be equal or greater than the `start` value to be included in the range. pub start: T, + /// If [`RangeEnd::Open`], this is an open range. Otherwise, a value must be strictly less than + /// the `end` value to be included in the range. pub end: RangeEnd, } @@ -62,28 +87,38 @@ impl From<(T, RangeEnd)> for Range { } impl Range { + /// Create a new range. pub fn new(start: T, end: RangeEnd) -> Self { Self { start, end } } + + /// Returns `true` if this range is closed. pub fn is_closed(&self) -> bool { matches!(self.end, RangeEnd::Closed(_)) } + + /// Returns `true` if this range is open. pub fn is_open(&self) -> bool { matches!(self.end, RangeEnd::Open) } } + impl Range { - fn all() -> Self { + /// Create a new range that covers everything. + pub fn full() -> Self { Self::new(T::default(), RangeEnd::Open) } } + impl Default for Range { fn default() -> Self { - Self::all() + Self::full() } } + impl Range { - fn intersection(&self, other: &Self) -> Option { + /// Create the intersection between this range and another range. + pub fn intersection(&self, other: &Self) -> Option { let start = (&self.start).max(&other.start); let end = match (&self.end, &other.end) { (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(b), @@ -100,7 +135,9 @@ impl Range { } } } + impl Range { + /// Returns `true` if this range includes nothing. pub fn is_empty(&self) -> bool { match &self.end { RangeEnd::Open => false, @@ -110,18 +147,23 @@ impl Range { } impl Range { + /// Returns `true` if `value` is included in this range. pub fn includes(&self, value: &T) -> bool { value >= &self.start && self.end.includes(value) } + /// Returns `true` if `other` range is fully included in this range. pub fn includes_range(&self, other: &Range) -> bool { self.start <= other.start && self.end >= other.end } } +/// The end of a range, either open or closed. #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy, Hash)] pub enum RangeEnd { + /// Closed end: a value has to be strictly less than the close value to be included. Closed(T), + /// Open range (no end value) Open, } @@ -148,6 +190,7 @@ impl PartialOrd for RangeEnd { // } impl RangeEnd { + /// Returns `true` if the range end is open, or if `value` is strictly less than the range end. pub fn includes(&self, value: &T) -> bool { match self { Self::Open => true, @@ -168,6 +211,7 @@ pub struct AreaOfInterest { } impl AreaOfInterest { + /// Create a new [`AreaOfInterest`] that covers everything. pub fn full() -> Self { Self { area: Area::full(), @@ -301,12 +345,6 @@ pub fn path_range_end(path: &Path) -> RangeEnd { RangeEnd::Closed(Path::from_bytes_unchecked(out)) } } - // let mut bytes = id.to_bytes(); - // if increment_by_one(&mut bytes) { - // RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) - // } else { - // RangeEnd::Open - // } } pub fn subspace_range_end(id: SubspaceId) -> RangeEnd { From ab16c40f6d4b46b52468f0ff6d1855437759f64b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 25 Apr 2024 14:24:51 +0200 Subject: [PATCH 010/198] remove unused code --- iroh-willow/src/session/reconciler.rs | 161 -------------------------- 1 file changed, 161 deletions(-) delete mode 100644 iroh-willow/src/session/reconciler.rs diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs deleted file mode 100644 index fd48d3baea..0000000000 --- a/iroh-willow/src/session/reconciler.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::collections::VecDeque; - -use super::Error; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; -use tracing::info; - -#[derive(Debug)] -pub enum Yield { - Init, - OutboxFull, - InboxEmpty, - AllDone(usize), -} - -// pub type Coroutine = genawaiter::Coroutine - -#[derive(Debug)] -pub struct Reconciler { - // gen: Option>, // co: genawaiter::sync::Co>, - state: State, -} - -#[derive(Debug)] -pub struct State { - outbox: VecDeque, - inbox: VecDeque, - count: usize, -} - -pub struct WorkerState { - inbox: flume::Receiver, - outbox: flume::Sender, - sum: usize, -} - -pub struct NetState { - outbox: flume::Receiver, - inbox: flume::Sender, -} - -fn create_state(cap: usize) -> (NetState, WorkerState) { - let (outbox_send, outbox_recv) = flume::bounded(cap); - let (inbox_send, inbox_recv) = flume::bounded(cap); - let ws = WorkerState { - inbox: inbox_recv, - outbox: outbox_send, - sum: 0, - }; - let ns = NetState { - inbox: inbox_send, - outbox: outbox_recv, - }; - (ns, ws) -} - -enum WorkerToNet { - MayResume, - Yield, - Finished, - Out(i32), -} - -async fn run_net( - ns: NetState, - recv: flume::Receiver, - send: flume::Sender, -) -> anyhow::Result<()> { - loop { - let mut pending_message = None; - // let mut yieled = true; - tokio::select! { - next = recv.recv_async(), if pending_message.is_none( )=> { - let msg = next?; - // if yielded { - // yielded = false; - // notify_worker(); - // } - if let Err(msg) = ns.inbox.try_send(msg) { - pending_message.insert(msg.into_inner()); - } - } - out = ns.outbox.recv_async() => { - let out = out?; - match out { - WorkerToNet::MayResume => { - if let Some(msg) = pending_message.take() { - ns.inbox.send_async(msg).await?; - } - } - WorkerToNet::Out(msg) => { - send.send_async(msg).await?; - } - WorkerToNet::Finished => break, - WorkerToNet::Yield => { - // yielded = true; - } - } - } - } - } - Ok(()) -} - -// struct SharedState - -impl Reconciler { - pub fn run_worker(&mut self) { - let mut gen = Gen::new(|co| Self::producer(co)); - loop { - match gen.resume_with(&mut self.state) { - GeneratorState::Yielded(val) => { - info!("Yielded: {val:?}") - } - GeneratorState::Complete(res) => { - info!("Complete: {res:?}") - } - } - } - } - - pub fn push_inbox(&mut self, msg: i32) -> bool { - self.state.inbox.push_back(msg); - if self.state.inbox.len() == 2 { - false - } else { - true - } - } - - pub fn drain_outbox(&mut self) -> impl Iterator + '_ { - self.state.outbox.drain(..) - } - - async fn producer(co: Co) -> Result<(), Error> { - loop { - let state = co.yield_(Yield::Init).await; - // exit condition - if state.count > 6 { - co.yield_(Yield::AllDone(state.count)).await; - return Ok(()); - } - - let next = state.inbox.pop_front(); - match next { - None => { - co.yield_(Yield::InboxEmpty).await; - continue; - } - Some(msg) => { - state.outbox.push_back(msg * 17); - if state.outbox.len() == 3 { - co.yield_(Yield::OutboxFull).await; - } - } - } - } - } -} From 31bb1fe0425a5681e027dfcf9258692fa94d88b0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 26 Apr 2024 02:21:48 +0200 Subject: [PATCH 011/198] cleanup and renames --- iroh-willow/src/session.rs | 170 ++++++++-------------------- iroh-willow/src/session/resource.rs | 111 ++++++++++++++++++ 2 files changed, 156 insertions(+), 125 deletions(-) create mode 100644 iroh-willow/src/session/resource.rs diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index e12743dbc0..e5499f4b19 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet, VecDeque}, + collections::{HashSet, VecDeque}, fmt, }; @@ -15,91 +15,21 @@ use crate::{ meadowcap::InvalidCapability, wgps::{ AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, Fingerprint, Handle, LengthyEntry, LogicalChannel, Message, - ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, - SetupBindStaticToken, StaticToken, StaticTokenHandle, + CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, + ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, + SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, + StaticTokenHandle, }, willow::{AuthorisationToken, AuthorisedEntry, Unauthorised}, }, store::{SplitAction, Store, SyncConfig}, }; -const LOGICAL_CHANNEL_CAP: usize = 128; - -#[derive(Debug)] -struct ResourceMap { - next_handle: u64, - map: HashMap>, -} - -impl Default for ResourceMap { - fn default() -> Self { - Self { - next_handle: 0, - map: Default::default(), - } - } -} - -impl ResourceMap -where - H: Handle, - R: Eq + PartialEq, -{ - pub fn bind(&mut self, resource: R) -> H { - let handle: H = self.next_handle.into(); - self.next_handle += 1; - let resource = Resource::new(resource); - self.map.insert(handle, resource); - handle - } - - pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { - // TODO: Optimize / find out if reverse index is better than find_map - if let Some(handle) = self - .map - .iter() - .find_map(|(handle, r)| (r.value == resource).then_some(handle)) - { - (*handle, false) - } else { - let handle = self.bind(resource); - (handle, true) - } - } - - pub fn get(&self, handle: &H) -> Option<&R> { - self.map.get(handle).as_ref().map(|r| &r.value) - } +use self::resource::ScopedResources; - pub fn try_get(&self, handle: &H) -> Result<&R, Error> { - self.get(handle).ok_or(Error::MissingResource) - } -} - -// #[derive(Debug)] -// enum ResourceState { -// Active, -// WeProposedFree, -// ToBeDeleted, -// } +const LOGICAL_CHANNEL_CAP: usize = 128; -#[derive(Debug)] -struct Resource { - value: V, - // state: ResourceState, - // unprocessed_messages: usize, -} -impl Resource { - pub fn new(value: V) -> Self { - Self { - value, - // state: ResourceState::Active, - // unprocessed_messages: 0, - } - } -} +pub mod resource; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -152,13 +82,6 @@ pub enum Role { Alfie, } -#[derive(Debug, Default)] -pub struct Resources { - capabilities: ResourceMap, - areas_of_interest: ResourceMap, - static_tokens: ResourceMap, -} - #[derive(Debug)] pub struct SessionInit { pub user_secret_key: UserSecretKey, @@ -240,8 +163,8 @@ pub struct Session { control_channel: Channel, reconciliation_channel: Channel, - our_resources: Resources, - their_resources: Resources, + our_resources: ScopedResources, + their_resources: ScopedResources, pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pending_entries: Option, @@ -366,10 +289,7 @@ impl Session { self.their_resources.static_tokens.bind(msg.static_token); } Message::SetupBindAreaOfInterest(msg) => { - let capability = self - .their_resources - .capabilities - .try_get(&msg.authorisation)?; + let capability = self.handle_to_capability(Scope::Theirs, &msg.authorisation)?; capability.try_granted_area(&msg.area_of_interest.area)?; let their_handle = self.their_resources.areas_of_interest.bind(msg); @@ -410,19 +330,15 @@ impl Session { our_handle: &AreaOfInterestHandle, their_handle: &AreaOfInterestHandle, ) -> Result<(), Error> { - let our_aoi = self.our_resources.areas_of_interest.try_get(&our_handle)?; - let their_aoi = self - .our_resources - .areas_of_interest - .try_get(&their_handle)?; + let our_aoi = self.our_resources.areas_of_interest.get(&our_handle)?; + let their_aoi = self.their_resources.areas_of_interest.get(&their_handle)?; let our_capability = self .our_resources .capabilities - .try_get(&our_aoi.authorisation)?; + .get(&our_aoi.authorisation)?; let namespace = our_capability.granted_namespace(); - // TODO: intersect with their_aoi first let common_aoi = &our_aoi .area() .intersection(&their_aoi.area()) @@ -479,7 +395,7 @@ impl Session { Ok(()) } - fn announce_then_send_entries( + fn announce_and_send_entries( &mut self, store: &mut S, namespace: NamespaceId, @@ -488,18 +404,18 @@ impl Session { their_handle: AreaOfInterestHandle, want_response: bool, is_final_reply_for_range: Option, - local_count: Option, + our_count: Option, ) -> Result<(), Error> { if want_response { self.pending_ranges.insert((our_handle, range.clone())); } - let local_count = match local_count { + let our_count = match our_count { Some(count) => count, None => store.count_range(namespace, &range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), - count: local_count, + count: our_count, want_response, will_sort: false, // todo: sorted? sender_handle: our_handle, @@ -511,7 +427,7 @@ impl Session { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); let (static_token, dynamic_token) = token.into_parts(); - // TODO: partial entries + // TODO: partial payloads let available = entry.payload_length; let static_token_handle = self.bind_static_token(static_token); let msg = ReconciliationSendEntry { @@ -559,7 +475,7 @@ impl Session { } } for (subrange, count, is_final_reply) in announce_entries.into_iter() { - self.announce_then_send_entries( + self.announce_and_send_entries( store, namespace, &subrange, @@ -587,7 +503,6 @@ impl Session { Ok(()) } - /// Uses the blocking [`Store`] and thus may only be called in the worker thread. fn process_reconciliation( &mut self, store: &mut S, @@ -621,7 +536,7 @@ impl Session { } // case 2: fingerprint is empty else if their_fingerprint.is_empty() { - self.announce_then_send_entries( + self.announce_and_send_entries( store, namespace, &range, @@ -660,7 +575,7 @@ impl Session { } let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; if want_response { - self.announce_then_send_entries( + self.announce_and_send_entries( store, namespace, &range, @@ -688,7 +603,7 @@ impl Session { let static_token = self .their_resources .static_tokens - .try_get(&static_token_handle)?; + .get(&static_token_handle)?; // TODO: avoid clone of static token? let authorisation_token = AuthorisationToken::from_parts(static_token.clone(), dynamic_token); @@ -712,13 +627,13 @@ impl Session { receiver_handle: &AreaOfInterestHandle, sender_handle: &AreaOfInterestHandle, ) -> Result { - let our_namespace = self.handle_to_namespace_id(Scope::Us, receiver_handle)?; - let their_namespace = self.handle_to_namespace_id(Scope::Them, sender_handle)?; + let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; + let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; if our_namespace != their_namespace { return Err(Error::AreaOfInterestNamespaceMismatch); } - let our_aoi = self.handle_to_aoi(Scope::Us, receiver_handle)?; - let their_aoi = self.handle_to_aoi(Scope::Them, sender_handle)?; + let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; + let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { return Err(Error::RangeOutsideCapability); @@ -726,14 +641,25 @@ impl Session { Ok(our_namespace.into()) } + fn handle_to_capability( + &self, + scope: Scope, + handle: &CapabilityHandle, + ) -> Result<&ReadCapability, Error> { + match scope { + Scope::Ours => self.our_resources.capabilities.get(handle), + Scope::Theirs => self.their_resources.capabilities.get(handle), + } + } + fn handle_to_aoi( &self, scope: Scope, handle: &AreaOfInterestHandle, ) -> Result<&SetupBindAreaOfInterest, Error> { match scope { - Scope::Us => self.our_resources.areas_of_interest.try_get(handle), - Scope::Them => self.their_resources.areas_of_interest.try_get(handle), + Scope::Ours => self.our_resources.areas_of_interest.get(handle), + Scope::Theirs => self.their_resources.areas_of_interest.get(handle), } } @@ -744,23 +670,17 @@ impl Session { ) -> Result<&NamespacePublicKey, Error> { let aoi = self.handle_to_aoi(scope, handle)?; let capability = match scope { - Scope::Us => self - .our_resources - .capabilities - .try_get(&aoi.authorisation)?, - Scope::Them => self - .their_resources - .capabilities - .try_get(&aoi.authorisation)?, + Scope::Ours => self.our_resources.capabilities.get(&aoi.authorisation)?, + Scope::Theirs => self.their_resources.capabilities.get(&aoi.authorisation)?, }; Ok(capability.granted_namespace()) } } #[derive(Copy, Clone, Debug)] -enum Scope { - Us, - Them, +pub enum Scope { + Ours, + Theirs, } #[derive(Debug)] diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs new file mode 100644 index 0000000000..3e3fd6ec35 --- /dev/null +++ b/iroh-willow/src/session/resource.rs @@ -0,0 +1,111 @@ +use std::collections::HashMap; + +use crate::proto::wgps::{ + AreaOfInterestHandle, CapabilityHandle, Handle, ReadCapability, SetupBindAreaOfInterest, + StaticToken, StaticTokenHandle, +}; + +use super::Error; + +#[derive(Debug, Default)] +pub struct ScopedResources { + pub capabilities: ResourceMap, + pub areas_of_interest: ResourceMap, + pub static_tokens: ResourceMap, +} + +#[derive(Debug)] +pub struct ResourceMap { + next_handle: u64, + map: HashMap>, +} + +impl Default for ResourceMap { + fn default() -> Self { + Self { + next_handle: 0, + map: Default::default(), + } + } +} + +impl ResourceMap +where + H: Handle, + R: Eq + PartialEq, +{ + pub fn bind(&mut self, resource: R) -> H { + let handle: H = self.next_handle.into(); + self.next_handle += 1; + let resource = Resource::new(resource); + self.map.insert(handle, resource); + handle + } + + pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { + // TODO: Optimize / find out if reverse index is better than find_map + if let Some(handle) = self + .map + .iter() + .find_map(|(handle, r)| (r.value == resource).then_some(handle)) + { + (*handle, false) + } else { + let handle = self.bind(resource); + (handle, true) + } + } + + pub fn get(&self, handle: &H) -> Result<&R, Error> { + self.map + .get(handle) + .as_ref() + .map(|r| &r.value) + .ok_or(Error::MissingResource) + } +} + +// #[derive(Debug)] +// enum ResourceState { +// Active, +// WeProposedFree, +// ToBeDeleted, +// } + +#[derive(Debug)] +struct Resource { + value: V, + // state: ResourceState, + // unprocessed_messages: usize, +} +impl Resource { + pub fn new(value: V) -> Self { + Self { + value, + // state: ResourceState::Active, + // unprocessed_messages: 0, + } + } +} + +// #[derive(Debug, Default)] +// pub struct Resources { +// pub ours: ScopedResources, +// pub theirs: ScopedResources, +// } +// +// impl Resources { +// pub fn scope(&self, scope: Scope) -> &ScopedResources { +// match scope { +// Scope::Ours => &self.ours, +// Scope::Theirs => &self.theirs, +// } +// } +// +// pub fn scope_mut(&mut self, scope: Scope) -> &mut ScopedResources { +// match scope { +// Scope::Ours => &mut self.ours, +// Scope::Theirs => &mut self.theirs, +// } +// } +// } From cdb5975ef8971aa2d7472568b8bc6deffb53cf3d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 26 Apr 2024 17:00:42 +0200 Subject: [PATCH 012/198] small cleanup --- iroh-willow/src/session.rs | 41 +++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index e5499f4b19..99c5ffed9f 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -125,7 +125,7 @@ impl ChallengeState { } } - pub fn sign_ours(&self, secret_key: &UserSecretKey) -> Result { + pub fn sign(&self, secret_key: &UserSecretKey) -> Result { let challenge = self.get_ours()?; let signature = secret_key.sign(challenge); Ok(signature) @@ -158,7 +158,7 @@ pub struct Session { _their_maximum_payload_size: usize, init: SessionInit, - challenge_state: ChallengeState, + challenge: ChallengeState, control_channel: Channel, reconciliation_channel: Channel, @@ -187,7 +187,7 @@ impl Session { let mut this = Self { role: our_role, _their_maximum_payload_size: their_maximum_payload_size, - challenge_state, + challenge: challenge_state, control_channel: Default::default(), reconciliation_channel: Default::default(), our_resources: Default::default(), @@ -251,7 +251,7 @@ impl Session { let intersection_handle = 0.into(); // register read capability - let signature = self.challenge_state.sign_ours(&init.user_secret_key)?; + let signature = self.challenge.sign(&init.user_secret_key)?; let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); let msg = SetupBindReadCapability { capability, @@ -275,12 +275,12 @@ impl Session { fn process_control(&mut self, store: &mut S, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { - self.challenge_state.reveal(self.role, msg.nonce)?; + self.challenge.reveal(self.role, msg.nonce)?; self.setup()?; } Message::SetupBindReadCapability(msg) => { msg.capability.validate()?; - self.challenge_state + self.challenge .verify(msg.capability.receiver(), &msg.signature)?; // TODO: verify intersection handle self.their_resources.capabilities.bind(msg.capability); @@ -641,15 +641,26 @@ impl Session { Ok(our_namespace.into()) } + fn resources(&self, scope: Scope) -> &ScopedResources { + match scope { + Scope::Ours => &self.our_resources, + Scope::Theirs => &self.their_resources, + } + } + + // fn resources_mut(&mut self, scope: Scope) -> &ScopedResources { + // match scope { + // Scope::Ours => &mut self.our_resources, + // Scope::Theirs => &mut self.their_resources, + // } + // } + fn handle_to_capability( &self, scope: Scope, handle: &CapabilityHandle, ) -> Result<&ReadCapability, Error> { - match scope { - Scope::Ours => self.our_resources.capabilities.get(handle), - Scope::Theirs => self.their_resources.capabilities.get(handle), - } + self.resources(scope).capabilities.get(handle) } fn handle_to_aoi( @@ -657,10 +668,7 @@ impl Session { scope: Scope, handle: &AreaOfInterestHandle, ) -> Result<&SetupBindAreaOfInterest, Error> { - match scope { - Scope::Ours => self.our_resources.areas_of_interest.get(handle), - Scope::Theirs => self.their_resources.areas_of_interest.get(handle), - } + self.resources(scope).areas_of_interest.get(handle) } fn handle_to_namespace_id( @@ -669,10 +677,7 @@ impl Session { handle: &AreaOfInterestHandle, ) -> Result<&NamespacePublicKey, Error> { let aoi = self.handle_to_aoi(scope, handle)?; - let capability = match scope { - Scope::Ours => self.our_resources.capabilities.get(&aoi.authorisation)?, - Scope::Theirs => self.their_resources.capabilities.get(&aoi.authorisation)?, - }; + let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; Ok(capability.granted_namespace()) } } From 9d8a9e4e1000dfbd1d932212fb7ff0af77c70d3e Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 29 Apr 2024 12:25:30 +0200 Subject: [PATCH 013/198] refactor: use encoder trait --- iroh-willow/src/lib.rs | 1 + iroh-willow/src/proto/meadowcap.rs | 2 ++ iroh-willow/src/proto/wgps.rs | 17 ++++++++- iroh-willow/src/proto/willow.rs | 56 +++++++++++------------------- iroh-willow/src/util.rs | 13 +++++++ 5 files changed, 52 insertions(+), 37 deletions(-) create mode 100644 iroh-willow/src/util.rs diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 7d308497a5..1375ed16a4 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -6,3 +6,4 @@ pub mod net; pub mod proto; pub mod session; pub mod store; +pub mod util; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index fe6168e074..35a64bac79 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,5 +1,7 @@ use serde::{Deserialize, Serialize}; +use crate::util::Encoder; + use super::{ grouping::Area, keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH}, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 587f5cf6a6..59b968cc81 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,9 +1,11 @@ -use std::fmt; +use std::{fmt, io::Write}; use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; +use crate::util::Encoder; + use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, meadowcap, @@ -179,6 +181,19 @@ pub enum Message { ControlFreeHandle(ControlFreeHandle), } +impl Encoder for Message { + fn encoded_len(&self) -> usize { + postcard::experimental::serialized_size(&self).unwrap() + 4 + } + + fn encode_into(&self, out: &mut W) -> std::io::Result<()> { + let len = self.encoded_len() as u32; + out.write_all(&len.to_be_bytes())?; + postcard::to_io(self, out).expect("encoding not to fail"); + Ok(()) + } +} + impl Message { pub fn logical_channel(&self) -> LogicalChannel { match self { diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index b5c5cf414d..cf23de9205 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -283,9 +283,11 @@ pub mod encodings { //! TODO: Verify that these are correct accoring to the spec! These encodings are the message //! bytes for authorisation signatures, so we better not need to change them again. + use std::io::Write; + use bytes::Bytes; - use crate::proto::keys::PUBLIC_KEY_LENGTH; + use crate::{proto::keys::PUBLIC_KEY_LENGTH, util::Encoder}; use super::{Entry, Path, DIGEST_LENGTH}; @@ -299,56 +301,38 @@ pub mod encodings { type UPathLengthPower = u16; type UPathCountPower = u16; - impl Path { - pub fn encoded_len(&self) -> usize { + impl Encoder for Path { + fn encoded_len(&self) -> usize { let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; let data_len = self.iter().map(Bytes::len).sum::(); lengths_len + data_len } /// Encode in the format for signatures into a mutable vector. - pub fn encode_into(&self, out: &mut Vec) { + fn encode_into(&self, out: &mut W) -> std::io::Result<()> { let component_count = self.len() as UPathCountPower; - out.extend_from_slice(&component_count.to_be_bytes()); + out.write_all(&component_count.to_be_bytes())?; for component in self.iter() { let len = component.len() as UPathLengthPower; - out.extend_from_slice(&len.to_be_bytes()); - out.extend_from_slice(&component); + out.write_all(&len.to_be_bytes())?; + out.write_all(&component)?; } - } - - pub fn encode(&self) -> Vec { - let mut out = Vec::with_capacity(self.encoded_len()); - self.encode_into(&mut out); - out + Ok(()) } } - impl Entry { - /// Convert the entry to a byte slice. - /// - /// This is invoked to create the signable for signatures over the entry. Thus, any change in - /// the encoding format here will make existing signatures invalid. - /// - /// The encoding follows the [`Willow spec for encoding`](https://willowprotocol.org/specs/encodings/index.html#enc_entry). - // TODO: make sure that the encoding fits the spec - pub fn encode(&self) -> Vec { - let len = self.encoded_len(); - let mut out = Vec::with_capacity(len); - self.encode_into(&mut out); - out - } - - pub fn encode_into(&self, out: &mut Vec) { - out.extend_from_slice(self.namespace_id.as_bytes()); - out.extend_from_slice(self.subspace_id.as_bytes()); - self.path.encode_into(out); - out.extend_from_slice(&self.timestamp.to_be_bytes()); - out.extend_from_slice(&self.payload_length.to_be_bytes()); - out.extend_from_slice(self.payload_digest.as_bytes()); + impl Encoder for Entry { + fn encode_into(&self, out: &mut W) -> std::io::Result<()> { + out.write_all(self.namespace_id.as_bytes())?; + out.write_all(self.subspace_id.as_bytes())?; + self.path.encode_into(out)?; + out.write_all(&self.timestamp.to_be_bytes())?; + out.write_all(&self.payload_length.to_be_bytes())?; + out.write_all(self.payload_digest.as_bytes())?; + Ok(()) } - pub fn encoded_len(&self) -> usize { + fn encoded_len(&self) -> usize { let path_len = self.path.encoded_len(); PUBLIC_KEY_LENGTH + PUBLIC_KEY_LENGTH + path_len + 8 + 8 + DIGEST_LENGTH } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs new file mode 100644 index 0000000000..ed3155fc80 --- /dev/null +++ b/iroh-willow/src/util.rs @@ -0,0 +1,13 @@ +use std::io; + +pub trait Encoder { + fn encoded_len(&self) -> usize; + + fn encode_into(&self, out: &mut W) -> io::Result<()>; + + fn encode(&self) -> Vec { + let mut out = Vec::with_capacity(self.encoded_len()); + self.encode_into(&mut out).expect("encoding not to fail"); + out + } +} From 171a32dd654929790518478dfa8ebd162cb36875 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 7 May 2024 00:02:04 +0200 Subject: [PATCH 014/198] wip --- Cargo.lock | 11 + iroh-willow/Cargo.toml | 2 + iroh-willow/src/net.rs | 27 +- iroh-willow/src/proto/meadowcap.rs | 6 +- iroh-willow/src/proto/wgps.rs | 28 +- iroh-willow/src/proto/willow.rs | 4 +- iroh-willow/src/session.rs | 12 +- iroh-willow/src/session/channel.rs | 0 iroh-willow/src/session/coroutine.rs | 463 ++++++++++++++++++++++++++ iroh-willow/src/session/reconciler.rs | 161 +++++++++ iroh-willow/src/session/util.rs | 33 ++ iroh-willow/src/store.rs | 84 +++-- iroh-willow/src/store/actor.rs | 261 +++++++++++++++ iroh-willow/src/util.rs | 20 +- iroh-willow/src/util/channel.rs | 376 +++++++++++++++++++++ 15 files changed, 1436 insertions(+), 52 deletions(-) create mode 100644 iroh-willow/src/session/channel.rs create mode 100644 iroh-willow/src/session/coroutine.rs create mode 100644 iroh-willow/src/session/reconciler.rs create mode 100644 iroh-willow/src/session/util.rs create mode 100644 iroh-willow/src/store/actor.rs create mode 100644 iroh-willow/src/util/channel.rs diff --git a/Cargo.lock b/Cargo.lock index 20498bfb50..1c19c4975c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2934,6 +2934,7 @@ dependencies = [ "iroh-net", "iroh-test", "num_enum", + "parking_lot", "postcard", "proptest", "quinn", @@ -2941,6 +2942,7 @@ dependencies = [ "rand_chacha", "rand_core", "redb 2.0.0", + "rtrb", "self_cell", "serde", "strum 0.25.0", @@ -4613,6 +4615,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "rtrb" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "636b228b5adb32add7f0e41e5084d38aa66fb0f942e8a91751c1e90023288fbe" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "rustc-demangle" version = "0.1.24" diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 01cf4a552c..c1b14f2f42 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -46,6 +46,8 @@ futures = { version = "0.3", optional = true } self_cell = "1.0.3" zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } genawaiter = "0.99.1" +rtrb = "0.3.0" +parking_lot = "0.12.2" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index cf8d1b3f85..3c697e1362 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -11,7 +11,7 @@ use tracing::{debug, instrument}; use crate::{ proto::wgps::{AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER}, session::{Role, Session, SessionInit}, - store::Store, + store::{actor::StoreHandle, Store}, }; use self::codec::WillowCodec; @@ -30,13 +30,13 @@ async fn next_if_ready( } #[instrument(skip_all, fields(role=?role))] -pub async fn run( - store: &mut S, +pub async fn run( + store: &StoreHandle, conn: quinn::Connection, role: Role, init: SessionInit, ) -> anyhow::Result<()> { - let (mut send, mut recv) = match role { + let (mut control_send, mut control_recv) = match role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; @@ -44,13 +44,18 @@ pub async fn run( let our_nonce: AccessChallenge = rand::random(); debug!(?role, "start"); let (received_commitment, max_payload_size) = - exchange_commitments(&mut send, &mut recv, &our_nonce).await?; + exchange_commitments(&mut control_send, &mut control_recv, &our_nonce).await?; debug!(?role, "exchanged comittments"); + let (mut reconcile_send, mut reconcile_recv) = match role { + Role::Alfie => conn.open_bi().await?, + Role::Betty => conn.accept_bi().await?, + }; + let mut session = Session::new(role, our_nonce, max_payload_size, received_commitment, init); - let mut reader = FramedRead::new(recv, WillowCodec); - let mut writer = FramedWrite::new(send, WillowCodec); + let mut reader = FramedRead::new(control_recv, WillowCodec); + let mut writer = FramedWrite::new(control_send, WillowCodec); // TODO: blocking! session.process(store)?; @@ -129,7 +134,7 @@ mod tests { willow::{Entry, Path, SubspaceId}, }, session::{Role, SessionInit}, - store::{MemoryStore, Store}, + store::{actor::StoreHandle, MemoryStore, Store}, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -243,9 +248,11 @@ mod tests { debug!("init constructed"); + let handle_alfie = StoreHandle::spawn(store_alfie); + let handle_betty = StoreHandle::spawn(store_betty); let (res_alfie, res_betty) = tokio::join!( - run(&mut store_alfie, conn_alfie, Role::Alfie, init_alfie), - run(&mut store_betty, conn_betty, Role::Betty, init_betty), + run(&handle_alfie, conn_alfie, Role::Alfie, init_alfie), + run(&handle_betty, conn_betty, Role::Betty, init_betty), ); info!(time=?start.elapsed(), "reconciliation finished!"); diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 35a64bac79..22bac8f7bc 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -21,7 +21,8 @@ pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) - && capability.granted_area().includes_entry(entry) && capability .receiver() - .verify(&entry.encode(), signature) + // TODO: This allocates each time, avoid + .verify(&entry.encode().expect("encoding not to fail"), signature) .is_ok() } @@ -30,7 +31,8 @@ pub fn create_token( capability: McCapability, secret_key: &UserSecretKey, ) -> MeadowcapAuthorisationToken { - let signable = entry.encode(); + // TODO: This allocates each time, avoid + let signable = entry.encode().expect("encoding not to fail"); let signature = secret_key.sign(&signable); MeadowcapAuthorisationToken::from_parts(capability, signature) } diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 59b968cc81..410041b208 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -4,7 +4,7 @@ use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; -use crate::util::Encoder; +use crate::util::{DecodeOutcome, Decoder, Encoder}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, @@ -69,7 +69,7 @@ pub enum HandleType { } /// The different logical channels employed by the WGPS. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Copy, Clone)] pub enum LogicalChannel { /// Control channel ControlChannel, @@ -186,14 +186,31 @@ impl Encoder for Message { postcard::experimental::serialized_size(&self).unwrap() + 4 } - fn encode_into(&self, out: &mut W) -> std::io::Result<()> { + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { let len = self.encoded_len() as u32; out.write_all(&len.to_be_bytes())?; - postcard::to_io(self, out).expect("encoding not to fail"); + postcard::to_io(self, out)?; Ok(()) } } +impl Decoder for Message { + fn decode_from(data: &[u8]) -> anyhow::Result> { + if data.len() < 4 { + return Ok(DecodeOutcome::NeedMoreData); + } + let len = u32::from_be_bytes(data[..4].try_into().expect("just checked")) as usize; + if data.len() < 4 + len { + return Ok(DecodeOutcome::NeedMoreData); + } + let item = postcard::from_bytes(&data[4..len])?; + Ok(DecodeOutcome::Decoded { + item, + consumed: len, + }) + } +} + impl Message { pub fn logical_channel(&self) -> LogicalChannel { match self { @@ -340,7 +357,8 @@ impl fmt::Debug for Fingerprint { impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { - let next = Fingerprint(*Hash::new(&entry.encode()).as_bytes()); + // TODO: Don't allocate + let next = Fingerprint(*Hash::new(&entry.encode().expect("encoding not to fail")).as_bytes()); *self ^= next; } diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index cf23de9205..e299f6b99f 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -309,7 +309,7 @@ pub mod encodings { } /// Encode in the format for signatures into a mutable vector. - fn encode_into(&self, out: &mut W) -> std::io::Result<()> { + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { let component_count = self.len() as UPathCountPower; out.write_all(&component_count.to_be_bytes())?; for component in self.iter() { @@ -322,7 +322,7 @@ pub mod encodings { } impl Encoder for Entry { - fn encode_into(&self, out: &mut W) -> std::io::Result<()> { + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { out.write_all(self.namespace_id.as_bytes())?; out.write_all(self.subspace_id.as_bytes())?; self.path.encode_into(out)?; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 99c5ffed9f..f338035b42 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -30,6 +30,8 @@ use self::resource::ScopedResources; const LOGICAL_CHANNEL_CAP: usize = 128; pub mod resource; +pub mod coroutine; +mod util; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -57,6 +59,8 @@ pub enum Error { UnsupportedMessage, #[error("the received nonce does not match the received committment")] BrokenCommittement, + #[error("received an actor message for unknown session")] + SessionLost, } impl From for Error { @@ -345,7 +349,7 @@ impl Session { .ok_or(Error::AreaOfInterestDoesNotOverlap)?; let range = common_aoi.into_range(); - let fingerprint = store.range_fingerprint(namespace.into(), &range)?; + let fingerprint = store.fingerprint(namespace.into(), &range)?; self.send_fingerprint(range, fingerprint, *our_handle, *their_handle, None); self.reconciliation_started = true; Ok(()) @@ -411,7 +415,7 @@ impl Session { } let our_count = match our_count { Some(count) => count, - None => store.count_range(namespace, &range)?, + None => store.count(namespace, &range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), @@ -452,7 +456,7 @@ impl Session { let config = SyncConfig::default(); let mut announce_entries = vec![]; { - let iter = store.split_range(namespace, &range, &config)?; + let iter = store.split(namespace, &range, &config)?; let mut iter = iter.peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; @@ -522,7 +526,7 @@ impl Session { self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; - let our_fingerprint = store.range_fingerprint(namespace, &range)?; + let our_fingerprint = store.fingerprint(namespace, &range)?; // case 1: fingerprint match. if our_fingerprint == their_fingerprint { diff --git a/iroh-willow/src/session/channel.rs b/iroh-willow/src/session/channel.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs new file mode 100644 index 0000000000..fb0fa2b492 --- /dev/null +++ b/iroh-willow/src/session/coroutine.rs @@ -0,0 +1,463 @@ +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; + +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use tracing::warn; + +use crate::{ + proto::{ + grouping::ThreeDRange, + keys::{NamespaceId, NamespacePublicKey}, + wgps::{ + AreaOfInterestHandle, CapabilityHandle, Fingerprint, LengthyEntry, LogicalChannel, + Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindStaticToken, + StaticToken, StaticTokenHandle, + }, + willow::{AuthorisationToken, AuthorisedEntry}, + }, + store::{ReadonlyStore, SplitAction, Store, SyncConfig}, + util::channel::{Sender, Receiver, WriteOutcome}, +}; + +use super::{resource::ScopedResources, Channel, Error, Role, Scope}; + +#[derive(Debug, Copy, Clone)] +pub enum Yield { + SendBufferFull(LogicalChannel), + // NextMessage, +} + +// pub enum Resume { +// Continue, +// NextMessage(Message), +// } + +#[derive(derive_more::Debug)] +pub struct ReconcileRoutine { + pub store: Arc, + pub channels: Arc, + pub state: SessionState, + #[debug(skip)] + pub co: Co, +} + +pub type SessionState = Arc>; + +#[derive(Debug)] +pub struct Channels { + control_sender: Sender, + reconciliation_sender: Sender, + reconciliation_receiver: Receiver, +} + +impl Channels { + pub fn sender(&self, channel: LogicalChannel) -> &Sender { + match channel { + LogicalChannel::ControlChannel => &self.control_sender, + LogicalChannel::ReconciliationChannel => &self.reconciliation_sender, + } + } + pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { + match channel { + LogicalChannel::ControlChannel => unimplemented!(), + LogicalChannel::ReconciliationChannel => &self.reconciliation_receiver, + } + } +} + +#[derive(Debug)] +pub struct SessionStateInner { + our_resources: ScopedResources, + their_resources: ScopedResources, + reconciliation_started: bool, + pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pending_entries: Option, +} + +impl SessionStateInner { + fn resources(&self, scope: Scope) -> &ScopedResources { + match scope { + Scope::Ours => &self.our_resources, + Scope::Theirs => &self.their_resources, + } + } + + pub fn authorize_send_entry( + &mut self, + message: ReconciliationSendEntry, + ) -> Result { + let ReconciliationSendEntry { + entry, + static_token_handle, + dynamic_token, + } = message; + let mut remaining = self + .pending_entries + .clone() + .ok_or(Error::InvalidMessageInCurrentState)?; + remaining -= 1; + if remaining == 0 { + self.pending_entries = None; + } + let static_token = self + .their_resources + .static_tokens + .get(&static_token_handle)? + .clone(); + + let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); + let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; + Ok(authorised_entry) + } + + fn clear_pending_range_if_some( + &mut self, + our_handle: AreaOfInterestHandle, + pending_range: Option, + ) -> Result<(), Error> { + if let Some(range) = pending_range { + // TODO: avoid clone + if !self.pending_ranges.remove(&(our_handle, range.clone())) { + warn!("received duplicate final reply for range marker"); + Err(Error::InvalidMessageInCurrentState) + } else { + Ok(()) + } + } else { + Ok(()) + } + } + + fn bind_static_token( + &mut self, + static_token: StaticToken, + ) -> anyhow::Result<(StaticTokenHandle, Option)> { + let (handle, is_new) = self + .our_resources + .static_tokens + .bind_if_new(static_token.clone()); + let msg = is_new.then(|| SetupBindStaticToken { static_token }); + Ok((handle, msg)) + } + + fn handle_to_namespace_id( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result { + let aoi = self.resources(scope).areas_of_interest.get(handle)?; + let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; + let namespace_id = capability.granted_namespace().into(); + Ok(namespace_id) + } + + fn range_is_authorised( + &self, + range: &ThreeDRange, + receiver_handle: &AreaOfInterestHandle, + sender_handle: &AreaOfInterestHandle, + ) -> Result { + let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; + let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; + if our_namespace != their_namespace { + return Err(Error::AreaOfInterestNamespaceMismatch); + } + let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; + let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; + + if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + return Err(Error::RangeOutsideCapability); + } + Ok(our_namespace.into()) + } + + fn handle_to_aoi( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result<&SetupBindAreaOfInterest, Error> { + self.resources(scope).areas_of_interest.get(handle) + } +} + +// Note that all async methods yield to the owner of the coroutine. They are not running in a tokio +// context. You may not perform regular async operations in them. +impl ReconcileRoutine { + + pub async fn on_send_fingerprint( + mut self, + message: ReconciliationSendFingerprint, + ) -> Result<(), Error> { + let ReconciliationSendFingerprint { + range, + fingerprint: their_fingerprint, + sender_handle: their_handle, + receiver_handle: our_handle, + is_final_reply_for_range, + } = message; + + let namespace = { + let mut state = self.state.lock().unwrap(); + state.reconciliation_started = true; + state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + state.range_is_authorised(&range, &our_handle, &their_handle)? + }; + + let our_fingerprint = self.store.fingerprint(namespace, &range)?; + + // case 1: fingerprint match. + if our_fingerprint == their_fingerprint { + let msg = ReconciliationAnnounceEntries { + range: range.clone(), + count: 0, + want_response: false, + will_sort: false, + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range: Some(range), + }; + self.send_reconciliation(msg).await?; + } + // case 2: fingerprint is empty + else if their_fingerprint.is_empty() { + self.announce_and_send_entries( + namespace, + &range, + our_handle, + their_handle, + true, + Some(range.clone()), + None, + ) + .await?; + } + // case 3: fingerprint doesn't match and is non-empty + else { + // reply by splitting the range into parts unless it is very short + self.split_range_and_send_parts(namespace, &range, our_handle, their_handle) + .await?; + } + Ok(()) + } + pub async fn on_announce_entries( + mut self, + message: ReconciliationAnnounceEntries, + ) -> Result<(), Error> { + let ReconciliationAnnounceEntries { + range, + count, + want_response, + will_sort: _, + sender_handle: their_handle, + receiver_handle: our_handle, + is_final_reply_for_range, + } = message; + + let namespace = { + let mut state = self.state.lock().unwrap(); + state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + if state.pending_entries.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + let namespace = state.range_is_authorised(&range, &our_handle, &their_handle)?; + if count != 0 { + state.pending_entries = Some(count); + } + namespace + }; + if want_response { + self.announce_and_send_entries( + namespace, + &range, + our_handle, + their_handle, + false, + Some(range.clone()), + None, + ) + .await?; + } + Ok(()) + } + + // fn on_send_entry(&self, message: ReconciliationSendEntry) -> Result<(), Error> { + // // Message::ReconciliationSendEntry(message) => { + // // let ReconciliationSendEntry { + // // entry, + // // static_token_handle, + // // dynamic_token, + // // } = message; + // // let static_token = { + // // let mut state = self.state.lock().unwrap(); + // // let mut remaining = state + // // .pending_entries + // // .clone() + // // .ok_or(Error::InvalidMessageInCurrentState)?; + // // remaining -= 1; + // // if remaining == 0 { + // // state.pending_entries = None; + // // } + // // state + // // .their_resources + // // .static_tokens + // // .get(&static_token_handle)? + // // .clone() + // // }; + // // + // // let authorisation_token = + // // AuthorisationToken::from_parts(static_token, dynamic_token); + // // let authorised_entry = + // // AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; + // // self.store.ingest_entry(&authorised_entry)?; + // Ok(()) + // } + + async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { + self.send(msg).await + } + + async fn send_control(&self, msg: impl Into) -> anyhow::Result<()> { + self.send(msg).await + } + + async fn send(&self, message: impl Into) -> anyhow::Result<()> { + let message: Message = message.into(); + let channel = message.logical_channel(); + let sender = self.channels.sender(message.logical_channel()); + + loop { + match sender.send(&message)? { + WriteOutcome::Ok => break Ok(()), + WriteOutcome::BufferFull => { + self.co.yield_(Yield::SendBufferFull(channel)).await; + } + } + } + } + + async fn send_fingerprint( + &mut self, + range: ThreeDRange, + fingerprint: Fingerprint, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + is_final_reply_for_range: Option, + ) -> anyhow::Result<()> { + { + let mut state = self.state.lock().unwrap(); + state.pending_ranges.insert((our_handle, range.clone())); + } + let msg = ReconciliationSendFingerprint { + range, + fingerprint, + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range, + }; + self.send_reconciliation(msg).await?; + Ok(()) + } + + async fn announce_and_send_entries( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + want_response: bool, + is_final_reply_for_range: Option, + our_count: Option, + ) -> Result<(), Error> { + if want_response { + let mut state = self.state.lock().unwrap(); + state.pending_ranges.insert((our_handle, range.clone())); + } + let our_count = match our_count { + Some(count) => count, + None => self.store.count(namespace, &range)?, + }; + let msg = ReconciliationAnnounceEntries { + range: range.clone(), + count: our_count, + want_response, + will_sort: false, // todo: sorted? + sender_handle: our_handle, + receiver_handle: their_handle, + is_final_reply_for_range, + }; + self.send_reconciliation(msg).await?; + for authorised_entry in self.store.get_entries_with_authorisation(namespace, &range) { + let authorised_entry = authorised_entry?; + let (entry, token) = authorised_entry.into_parts(); + let (static_token, dynamic_token) = token.into_parts(); + // TODO: partial payloads + let available = entry.payload_length; + // TODO avoid such frequent locking + let (static_token_handle, static_token_bind_msg) = + self.state.lock().unwrap().bind_static_token(static_token)?; + if let Some(msg) = static_token_bind_msg { + self.send_control(msg).await?; + } + let msg = ReconciliationSendEntry { + entry: LengthyEntry::new(entry, available), + static_token_handle, + dynamic_token, + }; + self.send_reconciliation(msg).await?; + } + Ok(()) + } + + async fn split_range_and_send_parts( + &mut self, + namespace: NamespaceId, + range: &ThreeDRange, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + ) -> Result<(), Error> { + // TODO: expose this config + let config = SyncConfig::default(); + { + let iter = self.store.split(namespace, &range, &config)?; + // TODO: avoid collect + let iter = iter.collect::>().into_iter(); + let mut iter = iter.peekable(); + while let Some(res) = iter.next() { + let (subrange, action) = res?; + let is_last = iter.peek().is_none(); + let is_final_reply = is_last.then(|| range.clone()); + match action { + SplitAction::SendEntries(count) => { + self.announce_and_send_entries( + namespace, + &subrange, + our_handle, + their_handle, + true, + is_final_reply, + Some(count), + ) + .await?; + } + SplitAction::SendFingerprint(fingerprint) => { + self.send_fingerprint( + subrange, + fingerprint, + our_handle, + their_handle, + is_final_reply, + ) + .await?; + } + } + } + } + Ok(()) + } +} diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs new file mode 100644 index 0000000000..fd48d3baea --- /dev/null +++ b/iroh-willow/src/session/reconciler.rs @@ -0,0 +1,161 @@ +use std::collections::VecDeque; + +use super::Error; +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use tracing::info; + +#[derive(Debug)] +pub enum Yield { + Init, + OutboxFull, + InboxEmpty, + AllDone(usize), +} + +// pub type Coroutine = genawaiter::Coroutine + +#[derive(Debug)] +pub struct Reconciler { + // gen: Option>, // co: genawaiter::sync::Co>, + state: State, +} + +#[derive(Debug)] +pub struct State { + outbox: VecDeque, + inbox: VecDeque, + count: usize, +} + +pub struct WorkerState { + inbox: flume::Receiver, + outbox: flume::Sender, + sum: usize, +} + +pub struct NetState { + outbox: flume::Receiver, + inbox: flume::Sender, +} + +fn create_state(cap: usize) -> (NetState, WorkerState) { + let (outbox_send, outbox_recv) = flume::bounded(cap); + let (inbox_send, inbox_recv) = flume::bounded(cap); + let ws = WorkerState { + inbox: inbox_recv, + outbox: outbox_send, + sum: 0, + }; + let ns = NetState { + inbox: inbox_send, + outbox: outbox_recv, + }; + (ns, ws) +} + +enum WorkerToNet { + MayResume, + Yield, + Finished, + Out(i32), +} + +async fn run_net( + ns: NetState, + recv: flume::Receiver, + send: flume::Sender, +) -> anyhow::Result<()> { + loop { + let mut pending_message = None; + // let mut yieled = true; + tokio::select! { + next = recv.recv_async(), if pending_message.is_none( )=> { + let msg = next?; + // if yielded { + // yielded = false; + // notify_worker(); + // } + if let Err(msg) = ns.inbox.try_send(msg) { + pending_message.insert(msg.into_inner()); + } + } + out = ns.outbox.recv_async() => { + let out = out?; + match out { + WorkerToNet::MayResume => { + if let Some(msg) = pending_message.take() { + ns.inbox.send_async(msg).await?; + } + } + WorkerToNet::Out(msg) => { + send.send_async(msg).await?; + } + WorkerToNet::Finished => break, + WorkerToNet::Yield => { + // yielded = true; + } + } + } + } + } + Ok(()) +} + +// struct SharedState + +impl Reconciler { + pub fn run_worker(&mut self) { + let mut gen = Gen::new(|co| Self::producer(co)); + loop { + match gen.resume_with(&mut self.state) { + GeneratorState::Yielded(val) => { + info!("Yielded: {val:?}") + } + GeneratorState::Complete(res) => { + info!("Complete: {res:?}") + } + } + } + } + + pub fn push_inbox(&mut self, msg: i32) -> bool { + self.state.inbox.push_back(msg); + if self.state.inbox.len() == 2 { + false + } else { + true + } + } + + pub fn drain_outbox(&mut self) -> impl Iterator + '_ { + self.state.outbox.drain(..) + } + + async fn producer(co: Co) -> Result<(), Error> { + loop { + let state = co.yield_(Yield::Init).await; + // exit condition + if state.count > 6 { + co.yield_(Yield::AllDone(state.count)).await; + return Ok(()); + } + + let next = state.inbox.pop_front(); + match next { + None => { + co.yield_(Yield::InboxEmpty).await; + continue; + } + Some(msg) => { + state.outbox.push_back(msg * 17); + if state.outbox.len() == 3 { + co.yield_(Yield::OutboxFull).await; + } + } + } + } + } +} diff --git a/iroh-willow/src/session/util.rs b/iroh-willow/src/session/util.rs new file mode 100644 index 0000000000..0fa7bc729f --- /dev/null +++ b/iroh-willow/src/session/util.rs @@ -0,0 +1,33 @@ +// use crate::{ +// proto::{grouping::ThreeDRange, keys::NamespaceId, wgps::AreaOfInterestHandle}, +// store::{Store, SyncConfig}, +// session::Error, +// }; + +// pub struct SplitRange { +// snapshot: Snapshot, +// args: SplitRangeArgs, +// config: SyncConfig, +// } +// +// pub struct SplitRangeArgs { +// namespace: NamespaceId, +// range: ThreeDRange, +// our_handle: AreaOfInterestHandle, +// their_handle: AreaOfInterestHandle, +// } +// +// pub enum Yield { +// Done, +// OutboxFull, +// } +// +// fn run(mut state: SplitRange) -> Result<(), Error> { +// let SplitRange { +// snapshot: store, +// args, +// config, +// } = &mut state; +// let iter = store.split_range(args.namespace, &args.range, &config)?; +// Ok(()) +// } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 94bb0cb22e..1d798c2329 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use anyhow::Result; @@ -8,6 +8,8 @@ use crate::proto::{ willow::{AuthorisedEntry, Entry, NamespaceId}, }; +pub mod actor; + #[derive(Debug, Clone, Copy)] pub struct SyncConfig { /// Up to how many values to send immediately, before sending only a fingerprint. @@ -25,32 +27,26 @@ impl Default for SyncConfig { } } -pub trait Store: Send + 'static { - fn range_fingerprint( - &mut self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> Result; +pub trait ReadonlyStore: Send + 'static { + fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; - fn split_range( - &mut self, + fn split( + &self, namespace: NamespaceId, range: &ThreeDRange, config: &SyncConfig, ) -> Result>>; - fn count_range(&mut self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; fn get_entries_with_authorisation<'a>( - &'a mut self, + &'a self, namespace: NamespaceId, range: &ThreeDRange, ) -> impl Iterator> + 'a; - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()>; - fn get_entries<'a>( - &'a mut self, + &'a self, namespace: NamespaceId, range: &ThreeDRange, ) -> impl Iterator> + 'a { @@ -59,18 +55,21 @@ pub trait Store: Send + 'static { } } +pub trait Store: ReadonlyStore + 'static { + type Snapshot: ReadonlyStore + Send; + + fn snapshot(&mut self) -> Result; + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()>; +} + /// A very inefficient in-memory store, for testing purposes only #[derive(Debug, Default)] pub struct MemoryStore { entries: HashMap>, } -impl Store for MemoryStore { - fn range_fingerprint( - &mut self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> Result { +impl ReadonlyStore for MemoryStore { + fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { let mut fingerprint = Fingerprint::default(); for entry in self.get_entries(namespace, range) { let entry = entry?; @@ -79,8 +78,8 @@ impl Store for MemoryStore { Ok(fingerprint) } - fn split_range( - &mut self, + fn split( + &self, namespace: NamespaceId, range: &ThreeDRange, config: &SyncConfig, @@ -144,18 +143,18 @@ impl Store for MemoryStore { } let mut out = vec![]; for range in ranges { - let fingerprint = self.range_fingerprint(namespace, &range)?; + let fingerprint = self.fingerprint(namespace, &range)?; out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); } Ok(out.into_iter()) } - fn count_range(&mut self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { Ok(self.get_entries(namespace, range).count() as u64) } fn get_entries_with_authorisation<'a>( - &'a mut self, + &'a self, namespace: NamespaceId, range: &ThreeDRange, ) -> impl Iterator> + 'a { @@ -168,7 +167,42 @@ impl Store for MemoryStore { .collect::>() .into_iter() } +} + +impl ReadonlyStore for Arc { + fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + MemoryStore::fingerprint(&self, namespace, range) + } + + fn split( + &self, + namespace: NamespaceId, + range: &ThreeDRange, + config: &SyncConfig, + ) -> Result>> { + MemoryStore::split(&self, namespace, range, config) + } + + fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + MemoryStore::count(&self, namespace, range) + } + + fn get_entries_with_authorisation<'a>( + &'a self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a { + MemoryStore::get_entries_with_authorisation(&self, namespace, range) + } +} +impl Store for MemoryStore { + type Snapshot = Arc; + fn snapshot(&mut self) -> Result { + Ok(Arc::new(Self { + entries: self.entries.clone(), + })) + } fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()> { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs new file mode 100644 index 0000000000..aa8253bec3 --- /dev/null +++ b/iroh-willow/src/store/actor.rs @@ -0,0 +1,261 @@ +use std::{ + collections::{hash_map, HashMap, VecDeque}, + sync::{Arc, Mutex}, +}; + +use futures::{ + future::{BoxFuture, LocalBoxFuture}, + FutureExt, +}; +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use tracing::error; +// use iroh_net::NodeId; + +use super::Store; +use crate::{ + proto::wgps::{LogicalChannel, Message, ReconciliationSendEntry}, + session::{ + coroutine::{Channels, ReconcileRoutine, SessionState, Yield}, + Error, + }, + util::channel::{self, ReadOutcome, Receiver}, +}; + +pub const CHANNEL_CAP: usize = 1024; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub struct SessionId(u64); +pub type NodeId = SessionId; + +pub struct StoreHandle { + tx: flume::Sender, +} +impl StoreHandle { + pub fn spawn(store: S) -> StoreHandle { + let (tx, rx) = flume::bounded(CHANNEL_CAP); + let _join_handle = std::thread::spawn(move || { + let actor = StorageThread { + store, + sessions: Default::default(), + actor_rx: rx, + }; + if let Err(error) = actor.run() { + error!(?error, "storage thread failed"); + }; + }); + StoreHandle { tx } + } + pub async fn send(&self, action: ToActor) -> anyhow::Result<()> { + self.tx.send_async(action).await?; + Ok(()) + } +} + +#[derive(Debug)] +pub enum ToActor { + InitSession { + peer: NodeId, + state: SessionState, + channels: Arc, + }, + DropSession { + peer: NodeId, + }, + ResumeWrite { + peer: NodeId, + channel: LogicalChannel, + }, + ResumeRead { + peer: NodeId, + channel: LogicalChannel, + }, +} + +#[derive(Debug)] +struct StorageSession { + state: SessionState, + channels: Arc, + waiting: WaitingCoroutines, +} + +#[derive(derive_more::Debug, Default)] +struct WaitingCoroutines { + #[debug("{}", "on_control.len()")] + on_control: VecDeque, + #[debug("{}", "on_reconciliation.len()")] + on_reconciliation: VecDeque, +} + +impl WaitingCoroutines { + fn get_mut(&mut self, channel: LogicalChannel) -> &mut VecDeque { + match channel { + LogicalChannel::ControlChannel => &mut self.on_control, + LogicalChannel::ReconciliationChannel => &mut self.on_reconciliation, + } + } + fn push_back(&mut self, channel: LogicalChannel, generator: ReconcileGen) { + self.get_mut(channel).push_back(generator); + } + fn push_front(&mut self, channel: LogicalChannel, generator: ReconcileGen) { + self.get_mut(channel).push_front(generator); + } + fn pop_front(&mut self, channel: LogicalChannel) -> Option { + self.get_mut(channel).pop_front() + } + + fn is_empty(&self) -> bool { + self.on_control.is_empty() && self.on_reconciliation.is_empty() + } +} + +#[derive(Debug)] +pub struct StorageThread { + store: S, + sessions: HashMap, + actor_rx: flume::Receiver, +} + +type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; +type ReconcileGen = Gen; + +impl StorageThread { + pub fn run(mut self) -> anyhow::Result<()> { + loop { + match self.actor_rx.recv() { + Err(_) => break, + Ok(message) => self.handle_message(message)?, + } + } + Ok(()) + } + + fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { + match message { + ToActor::InitSession { + peer, + state, + channels, + } => { + let session = StorageSession { + state, + channels, + waiting: Default::default(), + }; + self.sessions.insert(peer, session); + self.resume_read(peer, LogicalChannel::ReconciliationChannel)?; + } + ToActor::DropSession { peer } => { + self.sessions.remove(&peer); + } + ToActor::ResumeWrite { peer, channel } => { + self.resume_write(peer, channel)?; + } + ToActor::ResumeRead { peer, channel } => { + self.resume_read(peer, channel)?; + } + } + Ok(()) + } + fn resume_read(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + let channel = self.session(&peer)?.channels.receiver(channel).clone(); + loop { + match channel.read_message()? { + ReadOutcome::NeedMoreData => { + channel.need_notify(); + break; + } + ReadOutcome::Item(message) => { + self.on_message(peer, message)?; + } + } + } + Ok(()) + } + + fn session_mut(&mut self, peer: &NodeId) -> Result<&mut StorageSession, Error> { + self.sessions + .get_mut(peer) + .ok_or(Error::InvalidMessageInCurrentState) + } + + fn session(&mut self, peer: &NodeId) -> Result<&StorageSession, Error> { + self.sessions + .get(peer) + .ok_or(Error::InvalidMessageInCurrentState) + } + fn on_message(&mut self, peer: NodeId, message: Message) -> Result<(), Error> { + match message { + Message::ReconciliationSendFingerprint(message) => { + self.start_coroutine(peer, |routine| { + routine.on_send_fingerprint(message).boxed_local() + })?; + } + Message::ReconciliationAnnounceEntries(message) => { + self.start_coroutine(peer, |routine| { + routine.on_announce_entries(message).boxed_local() + })?; + } + Message::ReconciliationSendEntry(message) => { + let session = self.session_mut(&peer)?; + let authorised_entry = session + .state + .lock() + .unwrap() + .authorize_send_entry(message)?; + self.store.ingest_entry(&authorised_entry)?; + } + _ => return Err(Error::UnsupportedMessage), + } + Ok(()) + } + + fn start_coroutine( + &mut self, + peer: NodeId, + producer: impl FnOnce(ReconcileRoutine) -> ReconcileFut, + ) -> Result<(), Error> { + let session = self.sessions.get_mut(&peer).ok_or(Error::SessionLost)?; + let snapshot = Arc::new(self.store.snapshot()?); + + let channels = session.channels.clone(); + let state = session.state.clone(); + + let mut generator = Gen::new(move |co| { + let routine = ReconcileRoutine { + store: snapshot, + channels, + state, + co, + }; + (producer)(routine) + }); + match generator.resume() { + GeneratorState::Yielded(Yield::SendBufferFull(channel)) => { + session.waiting.push_back(channel, generator); + Ok(()) + } + GeneratorState::Complete(res) => res, + } + } + + fn resume_write(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + let session = self.session_mut(&peer)?; + let Some(mut generator) = session.waiting.pop_front(channel) else { + // debug_assert!(false, "resume_coroutine called but no generator"); + // TODO: error? + return Ok(()); + }; + match generator.resume() { + GeneratorState::Yielded(why) => match why { + Yield::SendBufferFull(channel) => { + session.waiting.push_front(channel, generator); + Ok(()) + } + }, + GeneratorState::Complete(res) => res, + } + } +} diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index ed3155fc80..b6cc25ed7d 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -1,13 +1,25 @@ use std::io; +pub mod channel; + pub trait Encoder { fn encoded_len(&self) -> usize; - fn encode_into(&self, out: &mut W) -> io::Result<()>; + fn encode_into(&self, out: &mut W) -> anyhow::Result<()>; - fn encode(&self) -> Vec { + fn encode(&self) -> anyhow::Result> { let mut out = Vec::with_capacity(self.encoded_len()); - self.encode_into(&mut out).expect("encoding not to fail"); - out + self.encode_into(&mut out)?; + Ok(out) } } + +pub trait Decoder: Sized { + fn decode_from(data: &[u8]) -> anyhow::Result>; +} + +#[derive(Debug)] +pub enum DecodeOutcome { + NeedMoreData, + Decoded { item: T, consumed: usize } +} diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs new file mode 100644 index 0000000000..d80dbf89dc --- /dev/null +++ b/iroh-willow/src/util/channel.rs @@ -0,0 +1,376 @@ +use std::{ + io, + marker::PhantomData, + sync::{Arc, Mutex}, +}; + +use bytes::{Buf, Bytes, BytesMut}; +use tokio::sync::Notify; + +use crate::proto::wgps::Message; + +use super::{DecodeOutcome, Decoder, Encoder}; + +#[derive(Debug)] +struct Shared { + buf: BytesMut, + max_buffer_size: usize, + notify_readable: Arc, + notify_writable: Arc, + write_blocked: bool, + need_read_notify: bool, + need_write_notify: bool, +} + +impl Shared { + fn new(cap: usize) -> Self { + Self { + buf: BytesMut::new(), + max_buffer_size: cap, + notify_readable: Default::default(), + notify_writable: Default::default(), + write_blocked: false, + need_read_notify: false, + need_write_notify: false, + } + } + fn read_slice(&self) -> &[u8] { + &self.buf[..] + } + + fn read_advance(&mut self, cnt: usize) { + self.buf.advance(cnt); + if cnt > 0 && self.write_blocked { + self.write_blocked = false; + self.notify_writable.notify_one(); + } + } + + fn read_bytes(&mut self) -> Bytes { + let len = self.buf.len(); + if len > 0 && self.write_blocked { + self.write_blocked = false; + self.notify_writable.notify_one(); + } + self.buf.split_to(len).freeze() + } + + fn write_slice(&mut self, len: usize) -> Option<&mut [u8]> { + if self.remaining_write_capacity() < len { + self.write_blocked = true; + None + } else { + let old_len = self.buf.len(); + let new_len = self.buf.remaining() + len; + // TODO: check if the potential truncate harms perf + self.buf.resize(new_len, 0u8); + self.notify_readable.notify_one(); + Some(&mut self.buf[old_len..new_len]) + } + } + + fn write_message(&mut self, item: &T) -> anyhow::Result { + let len = item.encoded_len(); + if let Some(slice) = self.write_slice(len) { + let mut cursor = io::Cursor::new(slice); + item.encode_into(&mut cursor)?; + Ok(WriteOutcome::Ok) + } else { + Ok(WriteOutcome::BufferFull) + } + } + + fn read_message(&mut self) -> anyhow::Result> { + let data = self.read_slice(); + let res = match T::decode_from(data)? { + DecodeOutcome::NeedMoreData => ReadOutcome::NeedMoreData, + DecodeOutcome::Decoded { item, consumed } => { + self.read_advance(consumed); + ReadOutcome::Item(item) + } + }; + Ok(res) + } + + fn need_read_notify(&mut self) { + self.need_read_notify = true; + } + fn need_write_notify(&mut self) { + self.need_write_notify = true; + } + + fn remaining_write_capacity(&self) -> usize { + self.max_buffer_size - self.buf.len() + } +} + +#[derive(Debug)] +pub enum ReadOutcome { + NeedMoreData, + Item(T), +} + +#[derive(Debug)] +pub enum WriteOutcome { + BufferFull, + Ok, +} + +#[derive(Debug)] +pub struct Receiver { + shared: Arc>, + _ty: PhantomData, +} + +impl Clone for Receiver { + fn clone(&self) -> Self { + Self { + shared: Arc::clone(&self.shared), + _ty: PhantomData, + } + } +} + +impl Receiver { + pub fn read_bytes(&self) -> Bytes { + self.shared.lock().unwrap().read_bytes() + } + + pub fn read_message(&self) -> anyhow::Result> { + self.shared.lock().unwrap().read_message() + } + + pub fn need_notify(&self) { + self.shared.lock().unwrap().need_read_notify() + } + + pub async fn notify_readable(&self) { + let shared = self.shared.lock().unwrap(); + if !shared.read_slice().is_empty() { + return; + } + let notify = shared.notify_readable.clone(); + drop(shared); + notify.notified().await + } + + pub async fn read_message_async(&self) -> anyhow::Result { + loop { + let mut shared = self.shared.lock().unwrap(); + let notify = Arc::clone(&shared.notify_readable); + match shared.read_message()? { + ReadOutcome::NeedMoreData => { + drop(shared); + notify.notified().await; + } + ReadOutcome::Item(item) => return Ok(item), + } + } + } +} + +#[derive(Debug)] +pub struct Sender { + shared: Arc>, + _ty: PhantomData, +} + +impl Clone for Sender { + fn clone(&self) -> Self { + Self { + shared: Arc::clone(&self.shared), + _ty: PhantomData, + } + } +} + +impl Sender { + // fn write_slice_into(&self, len: usize) -> Option<&mut [u8]> { + // let mut shared = self.shared.lock().unwrap(); + // shared.write_slice(len) + // } + pub fn need_notify(&self) { + self.shared.lock().unwrap().need_write_notify() + } + + fn write_slice(&self, data: &[u8]) -> bool { + let mut shared = self.shared.lock().unwrap(); + match shared.write_slice(data.len()) { + None => false, + Some(out) => { + out.copy_from_slice(data); + true + } + } + } + + async fn write_slice_async(&self, data: &[u8]) -> bool { + loop { + let mut shared = self.shared.lock().unwrap(); + if shared.remaining_write_capacity() < data.len() { + let notify = shared.notify_writable.clone(); + drop(shared); + notify.notified().await; + } else { + let out = shared.write_slice(data.len()).expect("just checked"); + out.copy_from_slice(data); + return true; + } + } + } + + pub async fn notify_writable(&self) { + let shared = self.shared.lock().unwrap(); + if shared.remaining_write_capacity() > 0 { + return; + } + let notify = shared.notify_readable.clone(); + drop(shared); + notify.notified().await; + } + + fn remaining_write_capacity(&self) -> usize { + self.shared.lock().unwrap().remaining_write_capacity() + } + + pub fn send(&self, message: &T) -> anyhow::Result { + self.shared.lock().unwrap().write_message(message) + } + + // pub async fn send_co( + // &self, + // message: &T, + // yield_fn: F, + // // co: &genawaiter::sync::Co, + // // yield_value: Y, + // ) -> anyhow::Result<()> + // where + // F: Fn() -> Fut, + // Fut: std::future::Future, + // { + // loop { + // let res = self.shared.lock().unwrap().write_message(message)?; + // match res { + // WriteOutcome::BufferFull => (yield_fn)().await, + // WriteOutcome::Ok => break Ok(()), + // } + // } + // } + + pub async fn send_async(&self, message: T) -> anyhow::Result<()> { + loop { + let mut shared = self.shared.lock().unwrap(); + match shared.write_message(&message)? { + WriteOutcome::Ok => return Ok(()), + WriteOutcome::BufferFull => { + let notify = shared.notify_writable.clone(); + drop(shared); + notify.notified().await; + } + } + } + } +} + +pub fn channel(cap: usize) -> (Sender, Receiver) { + let shared = Shared::new(cap); + let shared = Arc::new(Mutex::new(shared)); + let sender = Sender { + shared: shared.clone(), + _ty: PhantomData, + }; + let receiver = Receiver { + shared, + _ty: PhantomData, + }; + (sender, receiver) +} + +// #[derive(Debug)] +// pub struct ChannelSender { +// id: u64, +// buf: rtrb::Producer, +// // waker: Option, +// } +// +// impl ChannelSender { +// pub fn remaining_capacity(&self) -> usize { +// self.buf.slots() +// } +// pub fn can_write_message(&mut self, message: &Message) -> bool { +// message.encoded_len() <= self.remaining_capacity() +// } +// +// pub fn write_message(&mut self, message: &Message) -> bool { +// let encoded_len = message.encoded_len(); +// if encoded_len > self.remaining_capacity() { +// return false; +// } +// message.encode_into(&mut self.buf).expect("length checked"); +// if let Some(waker) = self.waker.take() { +// waker.wake(); +// } +// true +// } +// +// pub fn set_waker(&mut self, waker: Waker) { +// self.waker = Some(waker); +// } +// } +// +// #[derive(Debug)] +// pub enum ToStoreActor { +// // NotifyWake(u64, Arc), +// Resume(u64), +// } +// +// #[derive(Debug)] +// pub struct ChannelReceiver { +// id: u64, +// // buf: rtrb::Consumer, +// buf: BytesMut, +// to_actor: flume::Sender, +// notify_readable: Arc, +// } +// +// impl ChannelReceiver { +// pub async fn read_chunk(&mut self) -> Result, ChunkError> { +// if self.is_empty() { +// self.acquire().await; +// } +// self.buf.read_chunk(self.readable_len()) +// } +// +// pub fn is_empty(&self) -> bool { +// self.buf.is_empty() +// } +// +// pub fn readable_len(&self) -> usize { +// self.buf.slots() +// } +// +// pub async fn resume(&mut self) { +// self.to_actor +// .send_async(ToStoreActor::Resume(self.id)) +// .await +// .unwrap(); +// } +// +// pub async fn acquire(&mut self) { +// if !self.is_empty() { +// return; +// } +// self.notify_readable.notified().await; +// } +// } +// +// pub struct ChannelSender { +// id: u64, +// buf: rtrb::Producer, +// to_actor: flume::Sender, +// notify_readable: Arc, +// } +// +// impl ChannelSender { +// pub +// } From 8a66019d2b9948954e897ba15a46ec3a4c77e918 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 7 May 2024 17:39:46 +0200 Subject: [PATCH 015/198] wip --- Cargo.lock | 13 + iroh-base/src/base32.rs | 2 +- iroh-willow/Cargo.toml | 2 + iroh-willow/src/lib.rs | 2 +- iroh-willow/src/net.rs | 396 ++++++++++--- iroh-willow/src/proto/grouping.rs | 11 +- iroh-willow/src/proto/wgps.rs | 77 ++- iroh-willow/src/session.rs | 857 ++++++++++++++------------- iroh-willow/src/session/coroutine.rs | 127 +++- iroh-willow/src/store/actor.rs | 310 +++++++--- iroh-willow/src/util.rs | 2 +- iroh-willow/src/util/channel.rs | 178 ++++-- 12 files changed, 1274 insertions(+), 703 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c19c4975c..67941e7175 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,6 +2953,8 @@ dependencies = [ "tokio-stream", "tokio-util", "tracing", + "tracing-chrome", + "tracing-subscriber", "zerocopy 0.8.0-alpha.7", ] @@ -5944,6 +5946,17 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "tracing-chrome" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf0a738ed5d6450a9fb96e86a23ad808de2b727fd1394585da5cdd6788ffe724" +dependencies = [ + "serde_json", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-core" version = "0.1.32" diff --git a/iroh-base/src/base32.rs b/iroh-base/src/base32.rs index a87153a166..61ec333508 100644 --- a/iroh-base/src/base32.rs +++ b/iroh-base/src/base32.rs @@ -18,7 +18,7 @@ pub fn fmt_append(bytes: impl AsRef<[u8]>, out: &mut String) { /// Convert to a base32 string limited to the first 10 bytes pub fn fmt_short(bytes: impl AsRef<[u8]>) -> String { - let len = bytes.as_ref().len().min(10); + let len = bytes.as_ref().len().min(5); let mut text = data_encoding::BASE32_NOPAD.encode(&bytes.as_ref()[..len]); text.make_ascii_lowercase(); text diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index c1b14f2f42..1c4d2861f7 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -56,6 +56,8 @@ tokio = { version = "1", features = ["sync", "macros"] } proptest = "1.2.0" tempfile = "3.4" test-strategy = "0.3.1" +tracing-chrome = "0.7.2" +tracing-subscriber = "0.3.18" [features] default = ["net", "metrics"] diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 1375ed16a4..8aef848b16 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,6 +1,6 @@ //! Implementation of willow -#![allow(missing_docs)] +#![allow(missing_docs, unused_imports, dead_code)] pub mod net; pub mod proto; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 3c697e1362..1b245a5c3b 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,98 +1,224 @@ -use std::{pin::Pin, task::Poll}; - -use anyhow::{ensure, Context}; -use futures::{SinkExt, Stream}; -use iroh_base::hash::Hash; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio_stream::StreamExt; -use tokio_util::codec::{Decoder, FramedRead, FramedWrite}; -use tracing::{debug, instrument}; +use std::{pin::Pin, sync::Arc, task::Poll}; + +use anyhow::{anyhow, ensure, Context}; +use futures::{FutureExt, SinkExt, Stream}; +use iroh_base::{hash::Hash, key::NodeId}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + task::JoinSet, +}; +// use tokio_stream::StreamExt; +// use tokio_util::codec::{FramedRead, FramedWrite}; +use tracing::{debug, instrument, Instrument, Span}; use crate::{ - proto::wgps::{AccessChallenge, ChallengeHash, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER}, - session::{Role, Session, SessionInit}, - store::{actor::StoreHandle, Store}, + proto::wgps::{ + AccessChallenge, ChallengeHash, LogicalChannel, CHALLENGE_HASH_LENGTH, + MAX_PAYLOAD_SIZE_POWER, + }, + session::{coroutine::Channels, Role, Session, SessionInit}, + store::actor::{ + Interest, Notifier, StoreHandle, + ToActor::{self, ResumeRecv}, + }, + util::{ + channel::{channel, Receiver, Sender}, + Decoder, Encoder, + }, }; use self::codec::WillowCodec; pub mod codec; -/// Read the next frame from a [`FramedRead`] but only if it is available without waiting on IO. -async fn next_if_ready( - mut reader: &mut FramedRead, -) -> Option> { - futures::future::poll_fn(|cx| match Pin::new(&mut reader).poll_next(cx) { - Poll::Ready(r) => Poll::Ready(r), - Poll::Pending => Poll::Ready(None), - }) - .await -} - -#[instrument(skip_all, fields(role=?role))] +// /// Read the next frame from a [`FramedRead`] but only if it is available without waiting on IO. +// async fn next_if_ready( +// mut reader: &mut FramedRead, +// ) -> Option> { +// futures::future::poll_fn(|cx| match Pin::new(&mut reader).poll_next(cx) { +// Poll::Ready(r) => Poll::Ready(r), +// Poll::Pending => Poll::Ready(None), +// }) +// .await +// } + +// #[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role, peer=%peer.fmt_short()))] +#[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role))] pub async fn run( - store: &StoreHandle, + me: NodeId, + store: StoreHandle, conn: quinn::Connection, - role: Role, + peer: NodeId, + our_role: Role, init: SessionInit, ) -> anyhow::Result<()> { - let (mut control_send, mut control_recv) = match role { + let (mut control_send, mut control_recv) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; let our_nonce: AccessChallenge = rand::random(); - debug!(?role, "start"); + debug!("start"); let (received_commitment, max_payload_size) = exchange_commitments(&mut control_send, &mut control_recv, &our_nonce).await?; - debug!(?role, "exchanged comittments"); + debug!("exchanged comittments"); - let (mut reconcile_send, mut reconcile_recv) = match role { + let (mut reconciliation_send, mut reconciliation_recv) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; + reconciliation_send.write_u8(0u8).await?; + reconciliation_recv.read_u8().await?; + debug!("reconcile channel open"); + + let (reconciliation_send_tx, reconciliation_send_rx) = channel(1024); + let (reconciliation_recv_tx, reconciliation_recv_rx) = channel(1024); + let (control_send_tx, control_send_rx) = channel(1024); + let (control_recv_tx, control_recv_rx) = channel(1024); + let channels = Channels { + control_send: control_send_tx, + control_recv: control_recv_rx, + reconciliation_send: reconciliation_send_tx, + reconciliation_recv: reconciliation_recv_rx, + }; - let mut session = Session::new(role, our_nonce, max_payload_size, received_commitment, init); - - let mut reader = FramedRead::new(control_recv, WillowCodec); - let mut writer = FramedWrite::new(control_send, WillowCodec); - - // TODO: blocking! - session.process(store)?; - - // send out initial messages - for message in session.drain_outbox() { - debug!(role=?role, ?message, "send"); - writer.send(message).await?; - } - - while let Some(message) = reader.next().await { - let message = message.context("error from reader")?; - debug!(%message,awaited=true, "recv"); - session.recv(message.into()); - - // keep pushing already buffered messages - while let Some(message) = next_if_ready(&mut reader).await { - let message = message.context("error from reader")?; - debug!(%message,awaited=false, "recv"); - // TODO: stop when session is full - session.recv(message.into()); + let session = Session::new( + peer, + our_role, + our_nonce, + max_payload_size, + received_commitment, + init, + channels.clone(), + store.clone(), + ); + + let res = { + let on_complete = session.notify_complete(); + + let session_fut = session.run_control(); + + let control_recv_fut = recv_loop( + &mut control_recv, + control_recv_tx, + store.notifier(LogicalChannel::Control, Interest::Recv, peer), + ); + let reconciliation_recv_fut = recv_loop( + &mut reconciliation_recv, + reconciliation_recv_tx, + store.notifier(LogicalChannel::Reconciliation, Interest::Recv, peer), + ); + let control_send_fut = send_loop( + &mut control_send, + control_send_rx, + store.notifier(LogicalChannel::Control, Interest::Send, peer), + ); + let reconciliation_send_fut = send_loop( + &mut reconciliation_send, + reconciliation_send_rx, + store.notifier(LogicalChannel::Reconciliation, Interest::Send, peer), + ); + tokio::pin!(session_fut); + tokio::pin!(control_send_fut); + tokio::pin!(reconciliation_send_fut); + tokio::pin!(control_recv_fut); + tokio::pin!(reconciliation_recv_fut); + + // let finish_tasks_fut = async { + // Result::<_, anyhow::Error>::Ok(()) + // }; + // + // finish_tasks_fut.await?; + // Ok(()) + let mut completed = false; + tokio::select! { + biased; + _ = on_complete.notified() => { + tracing::warn!("COMPLETE"); + channels.close_send(); + completed = true; + } + res = &mut session_fut => res.context("session")?, + res = &mut control_recv_fut => res.context("control_recv")?, + res = &mut control_send_fut => res.context("control_send")?, + res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, + res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, } + tracing::warn!("CLOSED"); + if completed { + // control_send.finish().await?; + Ok(()) + } else { + Err(anyhow!( + "All tasks finished but reconciliation did not complete" + )) + } + // tokio::pin!(finish_tasks_fut); + // let res = tokio::select! { + // res = &mut finish_tasks_fut => { + // match res { + // // we completed before on_complete was triggered: no success + // Ok(()) => Err(anyhow!("all tasks finished but reconciliation was not completed")), + // Err(err) => Err(err), + // } + // } + // _ = on_complete.notified()=> { + // // finish_tasks_fut.abort(); + // // join_set.abort_all(); + // Ok(()) + // } + // }; + // res + }; + control_send.finish().await?; + reconciliation_send.finish().await?; + res +} - // TODO: blocking! - let done = session.process(store)?; - debug!(?done, "process done"); - - for message in session.drain_outbox() { - debug!(%message, "send"); - writer.send(message).await?; +#[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] +async fn recv_loop( + recv_stream: &mut quinn::RecvStream, + channel_sender: Sender, + notifier: Notifier, +) -> anyhow::Result<()> { + loop { + // debug!("wait"); + let buf = recv_stream.read_chunk(1024 * 16, true).await?; + if let Some(buf) = buf { + channel_sender.write_slice_async(&buf.bytes[..]).await; + debug!(len = buf.bytes.len(), "recv"); + if channel_sender.is_receivable_notify_set() { + debug!("notify ResumeRecv"); + notifier.notify().await?; + // store_handle + // .send(ToActor::ResumeRecv { peer, channel }) + // .await?; + } + } else { + debug!("EOF"); + break; } + } + // recv_stream.stop() + Ok(()) +} - if done { - debug!("close"); - writer.close().await?; +#[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] +async fn send_loop( + send_stream: &mut quinn::SendStream, + channel_receiver: Receiver, + notifier: Notifier, +) -> anyhow::Result<()> { + while let Some(data) = channel_receiver.read_bytes_async().await { + let len = data.len(); + send_stream.write_chunk(data).await?; + debug!(len, "sent"); + if channel_receiver.is_sendable_notify_set() { + debug!("notify ResumeSend"); + notifier.notify().await?; } } + send_stream.finish().await?; Ok(()) } @@ -120,7 +246,8 @@ async fn exchange_commitments( mod tests { use std::{collections::HashSet, time::Instant}; - use iroh_base::hash::Hash; + use futures::StreamExt; + use iroh_base::{hash::Hash, key::SecretKey}; use iroh_net::MagicEndpoint; use rand::SeedableRng; use tracing::{debug, info}; @@ -134,7 +261,10 @@ mod tests { willow::{Entry, Path, SubspaceId}, }, session::{Role, SessionInit}, - store::{actor::StoreHandle, MemoryStore, Store}, + store::{ + actor::{StoreHandle, ToActor}, + MemoryStore, Store, + }, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -142,20 +272,29 @@ mod tests { #[tokio::test] async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); + // use tracing_chrome::ChromeLayerBuilder; + // use tracing_subscriber::{prelude::*, registry::Registry}; + // let (chrome_layer, _guard) = ChromeLayerBuilder::new().build(); + // tracing_subscriber::registry().with(chrome_layer).init(); + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let n_betty = 10; - let n_alfie = 20; + let n_betty = 1; + let n_alfie = 2; let ep_alfie = MagicEndpoint::builder() + .secret_key(SecretKey::generate_with_rng(&mut rng)) .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; let ep_betty = MagicEndpoint::builder() + .secret_key(SecretKey::generate_with_rng(&mut rng)) .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; let addr_betty = ep_betty.my_addr().await?; + let node_id_betty = ep_betty.node_id(); + let node_id_alfie = ep_alfie.node_id(); debug!("start connect"); let (conn_alfie, conn_betty) = tokio::join!( @@ -248,11 +387,25 @@ mod tests { debug!("init constructed"); - let handle_alfie = StoreHandle::spawn(store_alfie); - let handle_betty = StoreHandle::spawn(store_betty); + let handle_alfie = StoreHandle::spawn(store_alfie, node_id_alfie); + let handle_betty = StoreHandle::spawn(store_betty, node_id_betty); let (res_alfie, res_betty) = tokio::join!( - run(&handle_alfie, conn_alfie, Role::Alfie, init_alfie), - run(&handle_betty, conn_betty, Role::Betty, init_betty), + run( + node_id_alfie, + handle_alfie.clone(), + conn_alfie, + node_id_betty, + Role::Alfie, + init_alfie + ), + run( + node_id_betty, + handle_betty.clone(), + conn_betty, + node_id_alfie, + Role::Betty, + init_betty + ), ); info!(time=?start.elapsed(), "reconciliation finished!"); @@ -260,43 +413,110 @@ mod tests { info!("betty res {:?}", res_betty); info!( "alfie store {:?}", - get_entries_debug(&mut store_alfie, namespace_id) + get_entries_debug(&handle_alfie, namespace_id).await? ); info!( "betty store {:?}", - get_entries_debug(&mut store_betty, namespace_id) + get_entries_debug(&handle_betty, namespace_id).await? ); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); assert_eq!( - get_entries(&mut store_alfie, namespace_id), + get_entries(&handle_alfie, namespace_id).await?, expected_entries ); assert_eq!( - get_entries(&mut store_betty, namespace_id), + get_entries(&handle_betty, namespace_id).await?, expected_entries ); Ok(()) } - fn get_entries(store: &mut S, namespace: NamespaceId) -> HashSet { + async fn get_entries( + store: &StoreHandle, + namespace: NamespaceId, + ) -> anyhow::Result> { + let (tx, rx) = flume::bounded(1024); store - .get_entries(namespace, &ThreeDRange::full()) - .filter_map(Result::ok) - .collect() + .send(ToActor::GetEntries { + namespace, + reply: tx, + }) + .await?; + let entries: HashSet<_> = rx.into_stream().collect::>().await; + Ok(entries) } - fn get_entries_debug( - store: &mut S, + async fn get_entries_debug( + store: &StoreHandle, namespace: NamespaceId, - ) -> Vec<(SubspaceId, Path)> { - let mut entries: Vec<_> = store - .get_entries(namespace, &ThreeDRange::full()) - .filter_map(|r| r.ok()) + ) -> anyhow::Result> { + let entries = get_entries(store, namespace).await?; + let mut entries: Vec<_> = entries + .into_iter() .map(|e| (e.subspace_id, e.path)) .collect(); entries.sort(); - entries + Ok(entries) } } + +// let mut join_set = JoinSet::new(); +// join_set.spawn( +// session_fut +// .map(|r| ("session", r.map_err(|e| anyhow::Error::from(e)))) +// .instrument(Span::current()), +// ); +// join_set.spawn( +// control_recv_fut +// .map(|r| ("control_recv", r)) +// .instrument(Span::current()), +// ); +// join_set.spawn( +// reconciliation_recv_fut +// .map(|r| ("reconciliation_recv", r)) +// .instrument(Span::current()), +// ); +// join_set.spawn( +// control_send_fut +// .map(|r| ("control_send", r)) +// .instrument(Span::current()), +// ); +// join_set.spawn( +// reconciliation_send_fut +// .map(|r| ("reconciliation_send", r)) +// .instrument(Span::current()), +// ); +// +// let finish_tasks_fut = async { +// let mut failed: Option = None; +// while let Some(res) = join_set.join_next().await { +// match res { +// Ok((label, Err(err))) => { +// debug!(?err, "task {label} failed"); +// if failed.is_none() { +// failed = Some(err); +// join_set.abort_all(); +// } +// } +// Ok((label, Ok(()))) => { +// debug!("task {label} finished"); +// } +// Err(err) if err.is_cancelled() => { +// debug!("task cancelled"); +// } +// Err(err) => { +// debug!(?err, "task failed"); +// if failed.is_none() { +// failed = Some(err.into()); +// join_set.abort_all(); +// } +// } +// } +// } +// match failed { +// None => Ok(()), +// Some(err) => Err(err), +// } +// }; diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index ab0588fcf2..c4b6762c39 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -3,7 +3,16 @@ use std::cmp::Ordering; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use super::willow::{Entry, Path, SubspaceId, Timestamp}; +use super::{keys::NamespaceId, willow::{Entry, Path, SubspaceId, Timestamp}}; + +/// A three-dimensional range on a specific namespace. +#[derive(Debug)] +pub struct NamespacedRange { + /// The namespace + pub namespace: NamespaceId, + /// The 3DRange + pub range: ThreeDRange, +} /// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. #[derive(Debug, Serialize, Deserialize, Clone, Hash, Eq, PartialEq)] diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 410041b208..16e5771859 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -53,48 +53,57 @@ pub enum HandleType { /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: /// * pending (waiting for the other peer to perform scalar multiplication), /// * completed (both peers performed scalar multiplication). - IntersectionHandle, + Intersection, /// Resource handle for [`ReadCapability`] that certify access to some Entries. - CapabilityHandle, + Capability, /// Resource handle for [`AreaOfInterest`]s that peers wish to sync. - AreaOfInterestHandle, + AreaOfInterest, /// Resource handle that controls the matching from Payload transmissions to Payload requests. - PayloadRequestHandle, + PayloadRequest, /// Resource handle for [`StaticToken`]s that peers need to transmit. - StaticTokenHandle, + StaticToken, } /// The different logical channels employed by the WGPS. #[derive(Debug, Serialize, Deserialize, Copy, Clone)] pub enum LogicalChannel { /// Control channel - ControlChannel, + Control, /// Logical channel for performing 3d range-based set reconciliation. - ReconciliationChannel, + Reconciliation, // TODO: use all the channels // right now everything but reconciliation goes into the control channel // // /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. - // DataChannel, + // Data, // // /// Logical channel for controlling the binding of new IntersectionHandles. - // IntersectionChannel, + // Intersection, // // /// Logical channel for controlling the binding of new CapabilityHandles. - // CapabilityChannel, + // Capability, // // /// Logical channel for controlling the binding of new AreaOfInterestHandles. - // AreaOfInterestChannel, + // AreaOfInterest, // // /// Logical channel for controlling the binding of new PayloadRequestHandles. - // PayloadRequestChannel, + // PayloadRequest, // // /// Logical channel for controlling the binding of new StaticTokenHandles. - // StaticTokenChannel, + // StaticToken, +} + +impl LogicalChannel { + pub fn fmt_short(&self) -> &str { + match self { + LogicalChannel::Control => "C", + LogicalChannel::Reconciliation => "R", + } + } } #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] @@ -115,22 +124,22 @@ pub trait Handle: std::hash::Hash + From + Copy + Eq + PartialEq { impl Handle for CapabilityHandle { fn handle_type(&self) -> HandleType { - HandleType::CapabilityHandle + HandleType::Capability } } impl Handle for StaticTokenHandle { fn handle_type(&self) -> HandleType { - HandleType::StaticTokenHandle + HandleType::StaticToken } } impl Handle for AreaOfInterestHandle { fn handle_type(&self) -> HandleType { - HandleType::AreaOfInterestHandle + HandleType::AreaOfInterest } } impl Handle for IntersectionHandle { fn handle_type(&self) -> HandleType { - HandleType::IntersectionHandle + HandleType::Intersection } } @@ -183,11 +192,20 @@ pub enum Message { impl Encoder for Message { fn encoded_len(&self) -> usize { - postcard::experimental::serialized_size(&self).unwrap() + 4 + let data_len = postcard::experimental::serialized_size(&self).unwrap(); + let header_len = 4; + // tracing::debug!( + // data_len, + // header_len, + // full_len = data_len + header_len, + // "Message encoded_len" + // ); + data_len + header_len } fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - let len = self.encoded_len() as u32; + let len = postcard::experimental::serialized_size(&self).unwrap() as u32; + // tracing::debug!(msg_len = len, full_len = len + 4, "Message encode"); out.write_all(&len.to_be_bytes())?; postcard::to_io(self, out)?; Ok(()) @@ -196,17 +214,25 @@ impl Encoder for Message { impl Decoder for Message { fn decode_from(data: &[u8]) -> anyhow::Result> { + // tracing::debug!(input_len = data.len(), "Message decode: start"); if data.len() < 4 { return Ok(DecodeOutcome::NeedMoreData); } let len = u32::from_be_bytes(data[..4].try_into().expect("just checked")) as usize; - if data.len() < 4 + len { + // tracing::debug!(msg_len = len, "Message decode: parsed len"); + let end = len + 4; + if data.len() < end { + // tracing::debug!("Message decode: need more data"); return Ok(DecodeOutcome::NeedMoreData); } - let item = postcard::from_bytes(&data[4..len])?; + // tracing::debug!("Message decode: now deserilalize"); + let res = postcard::from_bytes(&data[4..end]); + // tracing::debug!(?res, "Message decode: res"); + let item = res?; + // tracing::debug!(?item, "Message decode: decoded!"); Ok(DecodeOutcome::Decoded { item, - consumed: len, + consumed: end, }) } } @@ -216,8 +242,8 @@ impl Message { match self { Message::ReconciliationSendFingerprint(_) | Message::ReconciliationAnnounceEntries(_) - | Message::ReconciliationSendEntry(_) => LogicalChannel::ReconciliationChannel, - _ => LogicalChannel::ControlChannel, + | Message::ReconciliationSendEntry(_) => LogicalChannel::Reconciliation, + _ => LogicalChannel::Control, } } } @@ -358,7 +384,8 @@ impl fmt::Debug for Fingerprint { impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { // TODO: Don't allocate - let next = Fingerprint(*Hash::new(&entry.encode().expect("encoding not to fail")).as_bytes()); + let next = + Fingerprint(*Hash::new(&entry.encode().expect("encoding not to fail")).as_bytes()); *self ^= next; } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index f338035b42..8d2ca026f5 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,16 +1,18 @@ use std::{ collections::{HashSet, VecDeque}, fmt, + sync::{Arc, Mutex}, }; use ed25519_dalek::SignatureError; -use iroh_base::hash::Hash; -use tracing::{debug, trace, warn}; +use iroh_base::{hash::Hash, key::NodeId}; +use tokio::sync::Notify; +use tracing::{debug, info, instrument, trace, warn}; use crate::{ proto::{ - grouping::{AreaOfInterest, ThreeDRange}, + grouping::{AreaOfInterest, NamespacedRange, ThreeDRange}, keys::{NamespaceId, NamespacePublicKey, UserPublicKey, UserSecretKey, UserSignature}, meadowcap::InvalidCapability, wgps::{ @@ -22,15 +24,22 @@ use crate::{ }, willow::{AuthorisationToken, AuthorisedEntry, Unauthorised}, }, - store::{SplitAction, Store, SyncConfig}, + store::{ + actor::{StoreHandle, ToActor}, + SplitAction, Store, SyncConfig, + }, + util::channel::ReadOutcome, }; -use self::resource::ScopedResources; +use self::{ + coroutine::{Channels, SessionState, SessionStateInner}, + resource::ScopedResources, +}; const LOGICAL_CHANNEL_CAP: usize = 128; -pub mod resource; pub mod coroutine; +pub mod resource; mod util; #[derive(Debug, thiserror::Error)] @@ -60,7 +69,7 @@ pub enum Error { #[error("the received nonce does not match the received committment")] BrokenCommittement, #[error("received an actor message for unknown session")] - SessionLost, + SessionNotFound, } impl From for Error { @@ -158,90 +167,138 @@ impl ChallengeState { #[derive(Debug)] pub struct Session { - role: Role, + peer: NodeId, + our_role: Role, _their_maximum_payload_size: usize, - init: SessionInit, challenge: ChallengeState, - - control_channel: Channel, - reconciliation_channel: Channel, - - our_resources: ScopedResources, - their_resources: ScopedResources, - pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pending_entries: Option, - - reconciliation_started: bool, + channels: Arc, + state: SessionState, our_current_aoi: Option, + store_handle: StoreHandle, } +// #[derive(Debug)] +// pub struct Session { +// role: Role, +// _their_maximum_payload_size: usize, +// +// init: SessionInit, +// challenge: ChallengeState, +// +// control_channel: Channel, +// reconciliation_channel: Channel, +// +// our_resources: ScopedResources, +// their_resources: ScopedResources, +// pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, +// pending_entries: Option, +// +// reconciliation_started: bool, +// our_current_aoi: Option, +// } + impl Session { pub fn new( + peer: NodeId, our_role: Role, our_nonce: AccessChallenge, their_maximum_payload_size: usize, received_commitment: ChallengeHash, init: SessionInit, + channels: Channels, + store_handle: StoreHandle, ) -> Self { let challenge_state = ChallengeState::Committed { our_nonce, received_commitment, }; - let mut this = Self { - role: our_role, + let state = SessionStateInner::default(); + let this = Self { + peer, + our_role, _their_maximum_payload_size: their_maximum_payload_size, challenge: challenge_state, - control_channel: Default::default(), - reconciliation_channel: Default::default(), - our_resources: Default::default(), - their_resources: Default::default(), our_current_aoi: None, // config init, - pending_ranges: Default::default(), - pending_entries: Default::default(), - reconciliation_started: false, + channels: Arc::new(channels), + state: Arc::new(Mutex::new(state)), + store_handle, }; let msg = CommitmentReveal { nonce: our_nonce }; - this.control_channel.send(msg); + this.channels + .sender(LogicalChannel::Control) + .send(&msg.into()) + .expect("channel not empty at start"); this } - pub fn drain_outbox(&mut self) -> impl Iterator + '_ { - self.control_channel - .outbox_drain() - .chain(self.reconciliation_channel.outbox_drain()) + pub fn notify_complete(&self) -> Arc { + self.state.lock().unwrap().notify_complete() } + // pub fn drain_outbox(&mut self) -> impl Iterator + '_ { + // self.control_channel + // .outbox_drain() + // .chain(self.reconciliation_channel.outbox_drain()) + // } + // pub fn our_role(&self) -> Role { - self.role + self.our_role } - pub fn recv(&mut self, message: Message) { - match message.logical_channel() { - LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), - LogicalChannel::ReconciliationChannel => { - self.reconciliation_channel.inbox_push_or_drop(message) + // pub fn recv(&mut self, message: Message) { + // match message.logical_channel() { + // LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), + // LogicalChannel::ReconciliationChannel => { + // self.reconciliation_channel.inbox_push_or_drop(message) + // } + // } + // } + + // pub fn is_complete(&self) -> bool { + // let state = self.state.lock().unwrap(); + // state.reconciliation_started + // && state.pending_ranges.is_empty() + // && state.pending_entries.is_none() + // } + + // pub async fn run(&mut self) + + #[instrument(skip_all)] + pub async fn run_control(mut self) -> Result<(), Error> { + loop { + trace!("wait recv"); + let message = self + .channels + .receiver(LogicalChannel::Control) + .read_message_async() + .await?; + match message { + None => break, + Some(message) => { + info!(%message, "recv"); + self.process_control(message).await?; + let is_complete = self.state.lock().unwrap().is_complete(); + debug!(session=%self.peer.fmt_short(), is_complete, "handled"); + } } } + Ok(()) + // Ok(()) } - pub fn process(&mut self, store: &mut S) -> Result { - trace!(pending = self.pending_ranges.len(), "process start!"); - // always process control messages first - while let Some(message) = self.control_channel.inbox_pop() { - self.process_control(store, message)?; - } - while let Some(message) = self.reconciliation_channel.inbox_pop() { - self.process_reconciliation(store, message)?; - } - trace!(pending = self.pending_ranges.len(), "process done!"); - Ok(self.reconciliation_started - && self.pending_ranges.is_empty() - && self.pending_entries.is_none()) + async fn send_control(&self, message: impl Into) -> Result<(), Error> { + let message: Message = message.into(); + self.channels + .sender(LogicalChannel::Control) + .send_async(&message) + .await?; + info!(msg=%message, "sent"); + Ok(()) } - fn setup(&mut self) -> Result<(), Error> { + async fn setup(&mut self) -> Result<(), Error> { let init = &self.init; let area_of_interest = init.area_of_interest.clone(); let capability = init.capability.clone(); @@ -256,56 +313,101 @@ impl Session { // register read capability let signature = self.challenge.sign(&init.user_secret_key)?; - let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); + let our_capability_handle = self + .state + .lock() + .unwrap() + .our_resources + .capabilities + .bind(capability.clone()); let msg = SetupBindReadCapability { capability, handle: intersection_handle, signature, }; - self.control_channel.send(msg); + self.send_control(msg).await?; // register area of interest let msg = SetupBindAreaOfInterest { area_of_interest, authorisation: our_capability_handle, }; - let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg.clone()); - self.control_channel.send(msg); + self.send_control(msg.clone()).await?; + let our_aoi_handle = self + .state + .lock() + .unwrap() + .our_resources + .areas_of_interest + .bind(msg.clone()); self.our_current_aoi = Some(our_aoi_handle); Ok(()) } - fn process_control(&mut self, store: &mut S, message: Message) -> Result<(), Error> { + // fn resources_mut(&self, scope: Scope) -> &mut + + async fn process_control(&mut self, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { - self.challenge.reveal(self.role, msg.nonce)?; - self.setup()?; + self.challenge.reveal(self.our_role, msg.nonce)?; + self.setup().await?; } Message::SetupBindReadCapability(msg) => { msg.capability.validate()?; self.challenge .verify(msg.capability.receiver(), &msg.signature)?; // TODO: verify intersection handle - self.their_resources.capabilities.bind(msg.capability); + self.state + .lock() + .unwrap() + .their_resources + .capabilities + .bind(msg.capability); } Message::SetupBindStaticToken(msg) => { - self.their_resources.static_tokens.bind(msg.static_token); + self.state + .lock() + .unwrap() + .their_resources + .static_tokens + .bind(msg.static_token); } Message::SetupBindAreaOfInterest(msg) => { - let capability = self.handle_to_capability(Scope::Theirs, &msg.authorisation)?; - capability.try_granted_area(&msg.area_of_interest.area)?; - let their_handle = self.their_resources.areas_of_interest.bind(msg); - - if self.role == Role::Alfie { - if let Some(our_handle) = self.our_current_aoi.clone() { - self.init_reconciliation(store, &our_handle, &their_handle)?; - } else { - warn!( - "received area of interest from remote, but no area of interest set on our side" - ); - } - } + let their_handle = self + .state + .lock() + .unwrap() + .setup_bind_area_of_interest(msg)?; + let start = if self.our_role == Role::Alfie { + let our_handle = self + .our_current_aoi + .clone() + .ok_or(Error::InvalidMessageInCurrentState)?; + Some((our_handle, their_handle)) + } else { + None + }; + let message = ToActor::InitSession { + peer: self.peer, + state: self.state.clone(), + channels: self.channels.clone(), + start, + }; + self.store_handle.send(message).await?; + + // } + // if self.our_role == Role::Alfie { + // if let Some(our_handle) = self.our_current_aoi.clone() { + // self.init_reconciliation(our_handle, their_handle).await?; + // } else { + // warn!( + // "received area of interest from remote, but no area of interest set on our side" + // ); + // } + // } else { + // + // } } Message::ControlFreeHandle(_msg) => { // TODO: Free handles @@ -315,342 +417,245 @@ impl Session { Ok(()) } - fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { - let (handle, is_new) = self - .our_resources - .static_tokens - .bind_if_new(static_token.clone()); - if is_new { - let msg = SetupBindStaticToken { static_token }; - self.control_channel - .send(Message::SetupBindStaticToken(msg)); - } - handle - } - - fn init_reconciliation( - &mut self, - store: &mut S, - our_handle: &AreaOfInterestHandle, - their_handle: &AreaOfInterestHandle, - ) -> Result<(), Error> { - let our_aoi = self.our_resources.areas_of_interest.get(&our_handle)?; - let their_aoi = self.their_resources.areas_of_interest.get(&their_handle)?; - - let our_capability = self - .our_resources - .capabilities - .get(&our_aoi.authorisation)?; - let namespace = our_capability.granted_namespace(); - - let common_aoi = &our_aoi - .area() - .intersection(&their_aoi.area()) - .ok_or(Error::AreaOfInterestDoesNotOverlap)?; - - let range = common_aoi.into_range(); - let fingerprint = store.fingerprint(namespace.into(), &range)?; - self.send_fingerprint(range, fingerprint, *our_handle, *their_handle, None); - self.reconciliation_started = true; - Ok(()) - } - - fn send_fingerprint( - &mut self, - range: ThreeDRange, - fingerprint: Fingerprint, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - is_final_reply_for_range: Option, - ) { - self.pending_ranges.insert((our_handle, range.clone())); - let msg = ReconciliationSendFingerprint { - range, - fingerprint, - sender_handle: our_handle, - receiver_handle: their_handle, - is_final_reply_for_range, - }; - self.reconciliation_channel.send(msg); - } - - fn announce_empty( - &mut self, - range: ThreeDRange, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - want_response: bool, - is_final_reply_for_range: Option, - ) -> Result<(), Error> { - if want_response { - self.pending_ranges.insert((our_handle, range.clone())); - } - let msg = ReconciliationAnnounceEntries { - range, - count: 0, - want_response, - will_sort: false, - sender_handle: our_handle, - receiver_handle: their_handle, - is_final_reply_for_range, - }; - self.reconciliation_channel - .send(Message::ReconciliationAnnounceEntries(msg)); - Ok(()) - } + // fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { + // let (handle, is_new) = self + // .our_resources + // .static_tokens + // .bind_if_new(static_token.clone()); + // if is_new { + // let msg = SetupBindStaticToken { static_token }; + // self.control_channel + // .send(Message::SetupBindStaticToken(msg)); + // } + // handle + // } - fn announce_and_send_entries( + async fn init_reconciliation( &mut self, - store: &mut S, - namespace: NamespaceId, - range: &ThreeDRange, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, - want_response: bool, - is_final_reply_for_range: Option, - our_count: Option, ) -> Result<(), Error> { - if want_response { - self.pending_ranges.insert((our_handle, range.clone())); - } - let our_count = match our_count { - Some(count) => count, - None => store.count(namespace, &range)?, - }; - let msg = ReconciliationAnnounceEntries { - range: range.clone(), - count: our_count, - want_response, - will_sort: false, // todo: sorted? - sender_handle: our_handle, - receiver_handle: their_handle, - is_final_reply_for_range, + // let mut state = self.state.lock().unwrap(); + // let our_aoi = state.our_resources.areas_of_interest.get(&our_handle)?; + // let their_aoi = state.their_resources.areas_of_interest.get(&their_handle)?; + // + // let our_capability = state + // .our_resources + // .capabilities + // .get(&our_aoi.authorisation)?; + // let namespace = our_capability.granted_namespace(); + // + // let common_aoi = &our_aoi + // .area() + // .intersection(&their_aoi.area()) + // .ok_or(Error::AreaOfInterestDoesNotOverlap)?; + // + // let range = common_aoi.into_range(); + // state.reconciliation_started = true; + // drop(state); + // let range = NamespacedRange { + // namespace: namespace.into(), + // range, + // }; + let message = ToActor::InitSession { + peer: self.peer, + state: self.state.clone(), + channels: self.channels.clone(), + start: Some((our_handle, their_handle)), // send_fingerprint: Some(range), }; - self.reconciliation_channel.send(msg); - for authorised_entry in store.get_entries_with_authorisation(namespace, &range) { - let authorised_entry = authorised_entry?; - let (entry, token) = authorised_entry.into_parts(); - let (static_token, dynamic_token) = token.into_parts(); - // TODO: partial payloads - let available = entry.payload_length; - let static_token_handle = self.bind_static_token(static_token); - let msg = ReconciliationSendEntry { - entry: LengthyEntry::new(entry, available), - static_token_handle, - dynamic_token, - }; - self.reconciliation_channel.send(msg); - } + self.store_handle.send(message).await?; Ok(()) } - fn split_range_and_send_parts( - &mut self, - store: &mut S, - namespace: NamespaceId, - range: &ThreeDRange, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - ) -> Result<(), Error> { - // TODO: expose this config - let config = SyncConfig::default(); - let mut announce_entries = vec![]; - { - let iter = store.split(namespace, &range, &config)?; - let mut iter = iter.peekable(); - while let Some(res) = iter.next() { - let (subrange, action) = res?; - let is_last = iter.peek().is_none(); - let is_final_reply = is_last.then(|| range.clone()); - match action { - SplitAction::SendEntries(count) => { - announce_entries.push((subrange, count, is_final_reply)); - } - SplitAction::SendFingerprint(fingerprint) => { - self.send_fingerprint( - subrange, - fingerprint, - our_handle, - their_handle, - is_final_reply, - ); - } - } - } - } - for (subrange, count, is_final_reply) in announce_entries.into_iter() { - self.announce_and_send_entries( - store, - namespace, - &subrange, - our_handle, - their_handle, - true, - is_final_reply, - Some(count), - )?; - } - Ok(()) - } - - fn clear_pending_range_if_some( - &mut self, - our_handle: AreaOfInterestHandle, - pending_range: Option, - ) -> Result<(), Error> { - if let Some(range) = pending_range { - if !self.pending_ranges.remove(&(our_handle, range.clone())) { - warn!("received duplicate final reply for range marker"); - return Err(Error::InvalidMessageInCurrentState); - } - } - Ok(()) - } - - fn process_reconciliation( - &mut self, - store: &mut S, - message: Message, - ) -> Result<(), Error> { - match message { - Message::ReconciliationSendFingerprint(message) => { - self.reconciliation_started = true; - let ReconciliationSendFingerprint { - range, - fingerprint: their_fingerprint, - sender_handle: their_handle, - receiver_handle: our_handle, - is_final_reply_for_range, - } = message; - - self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - - let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; - let our_fingerprint = store.fingerprint(namespace, &range)?; - - // case 1: fingerprint match. - if our_fingerprint == their_fingerprint { - self.announce_empty( - range.clone(), - our_handle, - their_handle, - false, - Some(range.clone()), - )?; - } - // case 2: fingerprint is empty - else if their_fingerprint.is_empty() { - self.announce_and_send_entries( - store, - namespace, - &range, - our_handle, - their_handle, - true, - Some(range.clone()), - None, - )?; - } - // case 3: fingerprint doesn't match and is non-empty - else { - // reply by splitting the range into parts unless it is very short - self.split_range_and_send_parts( - store, - namespace, - &range, - our_handle, - their_handle, - )?; - } - } - Message::ReconciliationAnnounceEntries(message) => { - let ReconciliationAnnounceEntries { - range, - count, - want_response, - will_sort: _, - sender_handle: their_handle, - receiver_handle: our_handle, - is_final_reply_for_range, - } = message; - self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - if self.pending_entries.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; - if want_response { - self.announce_and_send_entries( - store, - namespace, - &range, - our_handle, - their_handle, - false, - Some(range.clone()), - None, - )?; - } - if count != 0 { - self.pending_entries = Some(count); - } - } - Message::ReconciliationSendEntry(message) => { - let remaining = self - .pending_entries - .as_mut() - .ok_or(Error::InvalidMessageInCurrentState)?; - let ReconciliationSendEntry { - entry, - static_token_handle, - dynamic_token, - } = message; - let static_token = self - .their_resources - .static_tokens - .get(&static_token_handle)?; - // TODO: avoid clone of static token? - let authorisation_token = - AuthorisationToken::from_parts(static_token.clone(), dynamic_token); - let authorised_entry = - AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - store.ingest_entry(&authorised_entry)?; - - *remaining -= 1; - if *remaining == 0 { - self.pending_entries = None; - } - } - _ => return Err(Error::UnsupportedMessage), - } - Ok(()) - } - - fn range_is_authorised( - &self, - range: &ThreeDRange, - receiver_handle: &AreaOfInterestHandle, - sender_handle: &AreaOfInterestHandle, - ) -> Result { - let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; - let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; - if our_namespace != their_namespace { - return Err(Error::AreaOfInterestNamespaceMismatch); - } - let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; - let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; + // fn send_fingerprint( + // &mut self, + // range: ThreeDRange, + // fingerprint: Fingerprint, + // our_handle: AreaOfInterestHandle, + // their_handle: AreaOfInterestHandle, + // is_final_reply_for_range: Option, + // ) { + // self.pending_ranges.insert((our_handle, range.clone())); + // let msg = ReconciliationSendFingerprint { + // range, + // fingerprint, + // sender_handle: our_handle, + // receiver_handle: their_handle, + // is_final_reply_for_range, + // }; + // self.reconciliation_channel.send(msg); + // } - if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { - return Err(Error::RangeOutsideCapability); - } - Ok(our_namespace.into()) - } + // fn announce_empty( + // &mut self, + // range: ThreeDRange, + // our_handle: AreaOfInterestHandle, + // their_handle: AreaOfInterestHandle, + // want_response: bool, + // is_final_reply_for_range: Option, + // ) -> Result<(), Error> { + // if want_response { + // self.pending_ranges.insert((our_handle, range.clone())); + // } + // let msg = ReconciliationAnnounceEntries { + // range, + // count: 0, + // want_response, + // will_sort: false, + // sender_handle: our_handle, + // receiver_handle: their_handle, + // is_final_reply_for_range, + // }; + // self.reconciliation_channel + // .send(Message::ReconciliationAnnounceEntries(msg)); + // Ok(()) + // } - fn resources(&self, scope: Scope) -> &ScopedResources { - match scope { - Scope::Ours => &self.our_resources, - Scope::Theirs => &self.their_resources, - } - } + // + // fn process_reconciliation( + // &mut self, + // store: &mut S, + // message: Message, + // ) -> Result<(), Error> { + // match message { + // Message::ReconciliationSendFingerprint(message) => { + // self.reconciliation_started = true; + // let ReconciliationSendFingerprint { + // range, + // fingerprint: their_fingerprint, + // sender_handle: their_handle, + // receiver_handle: our_handle, + // is_final_reply_for_range, + // } = message; + // + // self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + // + // let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; + // let our_fingerprint = store.fingerprint(namespace, &range)?; + // + // // case 1: fingerprint match. + // if our_fingerprint == their_fingerprint { + // self.announce_empty( + // range.clone(), + // our_handle, + // their_handle, + // false, + // Some(range.clone()), + // )?; + // } + // // case 2: fingerprint is empty + // else if their_fingerprint.is_empty() { + // self.announce_and_send_entries( + // store, + // namespace, + // &range, + // our_handle, + // their_handle, + // true, + // Some(range.clone()), + // None, + // )?; + // } + // // case 3: fingerprint doesn't match and is non-empty + // else { + // // reply by splitting the range into parts unless it is very short + // self.split_range_and_send_parts( + // store, + // namespace, + // &range, + // our_handle, + // their_handle, + // )?; + // } + // } + // Message::ReconciliationAnnounceEntries(message) => { + // let ReconciliationAnnounceEntries { + // range, + // count, + // want_response, + // will_sort: _, + // sender_handle: their_handle, + // receiver_handle: our_handle, + // is_final_reply_for_range, + // } = message; + // self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; + // if self.pending_entries.is_some() { + // return Err(Error::InvalidMessageInCurrentState); + // } + // let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; + // if want_response { + // self.announce_and_send_entries( + // store, + // namespace, + // &range, + // our_handle, + // their_handle, + // false, + // Some(range.clone()), + // None, + // )?; + // } + // if count != 0 { + // self.pending_entries = Some(count); + // } + // } + // Message::ReconciliationSendEntry(message) => { + // let remaining = self + // .pending_entries + // .as_mut() + // .ok_or(Error::InvalidMessageInCurrentState)?; + // let ReconciliationSendEntry { + // entry, + // static_token_handle, + // dynamic_token, + // } = message; + // let static_token = self + // .their_resources + // .static_tokens + // .get(&static_token_handle)?; + // // TODO: avoid clone of static token? + // let authorisation_token = + // AuthorisationToken::from_parts(static_token.clone(), dynamic_token); + // let authorised_entry = + // AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; + // store.ingest_entry(&authorised_entry)?; + // + // *remaining -= 1; + // if *remaining == 0 { + // self.pending_entries = None; + // } + // } + // _ => return Err(Error::UnsupportedMessage), + // } + // Ok(()) + // } + // + // fn range_is_authorised( + // &self, + // range: &ThreeDRange, + // receiver_handle: &AreaOfInterestHandle, + // sender_handle: &AreaOfInterestHandle, + // ) -> Result { + // let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; + // let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; + // if our_namespace != their_namespace { + // return Err(Error::AreaOfInterestNamespaceMismatch); + // } + // let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; + // let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; + // + // if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + // return Err(Error::RangeOutsideCapability); + // } + // Ok(our_namespace.into()) + // } + // + // fn resources(&self, scope: Scope) -> &ScopedResources { + // match scope { + // Scope::Ours => &self.our_resources, + // Scope::Theirs => &self.their_resources, + // } + // } // fn resources_mut(&mut self, scope: Scope) -> &ScopedResources { // match scope { @@ -658,32 +663,32 @@ impl Session { // Scope::Theirs => &mut self.their_resources, // } // } - - fn handle_to_capability( - &self, - scope: Scope, - handle: &CapabilityHandle, - ) -> Result<&ReadCapability, Error> { - self.resources(scope).capabilities.get(handle) - } - - fn handle_to_aoi( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result<&SetupBindAreaOfInterest, Error> { - self.resources(scope).areas_of_interest.get(handle) - } - - fn handle_to_namespace_id( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result<&NamespacePublicKey, Error> { - let aoi = self.handle_to_aoi(scope, handle)?; - let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; - Ok(capability.granted_namespace()) - } + // + // fn handle_to_capability( + // &self, + // scope: Scope, + // handle: &CapabilityHandle, + // ) -> Result<&ReadCapability, Error> { + // self.resources(scope).capabilities.get(handle) + // } + // + // fn handle_to_aoi( + // &self, + // scope: Scope, + // handle: &AreaOfInterestHandle, + // ) -> Result<&SetupBindAreaOfInterest, Error> { + // self.resources(scope).areas_of_interest.get(handle) + // } + // + // fn handle_to_namespace_id( + // &self, + // scope: Scope, + // handle: &AreaOfInterestHandle, + // ) -> Result<&NamespacePublicKey, Error> { + // let aoi = self.handle_to_aoi(scope, handle)?; + // let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; + // Ok(capability.granted_namespace()) + // } } #[derive(Copy, Clone, Debug)] diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index fb0fa2b492..4470e83ad7 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -7,7 +7,8 @@ use genawaiter::{ sync::{Co, Gen}, GeneratorState, }; -use tracing::warn; +use tokio::sync::Notify; +use tracing::{debug, info, warn}; use crate::{ proto::{ @@ -22,7 +23,7 @@ use crate::{ willow::{AuthorisationToken, AuthorisedEntry}, }, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{Sender, Receiver, WriteOutcome}, + util::channel::{Receiver, Sender, WriteOutcome}, }; use super::{resource::ScopedResources, Channel, Error, Role, Scope}; @@ -30,16 +31,10 @@ use super::{resource::ScopedResources, Channel, Error, Role, Scope}; #[derive(Debug, Copy, Clone)] pub enum Yield { SendBufferFull(LogicalChannel), - // NextMessage, } -// pub enum Resume { -// Continue, -// NextMessage(Message), -// } - #[derive(derive_more::Debug)] -pub struct ReconcileRoutine { +pub struct Coroutine { pub store: Arc, pub channels: Arc, pub state: SessionState, @@ -49,35 +44,41 @@ pub struct ReconcileRoutine { pub type SessionState = Arc>; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Channels { - control_sender: Sender, - reconciliation_sender: Sender, - reconciliation_receiver: Receiver, + pub control_send: Sender, + pub control_recv: Receiver, + pub reconciliation_send: Sender, + pub reconciliation_recv: Receiver, } impl Channels { + pub fn close_send(&self) { + self.control_send.close(); + self.reconciliation_send.close(); + } pub fn sender(&self, channel: LogicalChannel) -> &Sender { match channel { - LogicalChannel::ControlChannel => &self.control_sender, - LogicalChannel::ReconciliationChannel => &self.reconciliation_sender, + LogicalChannel::Control => &self.control_send, + LogicalChannel::Reconciliation => &self.reconciliation_send, } } pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { match channel { - LogicalChannel::ControlChannel => unimplemented!(), - LogicalChannel::ReconciliationChannel => &self.reconciliation_receiver, + LogicalChannel::Control => &self.control_recv, + LogicalChannel::Reconciliation => &self.reconciliation_recv, } } } -#[derive(Debug)] +#[derive(Debug, Default)] pub struct SessionStateInner { - our_resources: ScopedResources, - their_resources: ScopedResources, - reconciliation_started: bool, - pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pending_entries: Option, + pub our_resources: ScopedResources, + pub their_resources: ScopedResources, + pub reconciliation_started: bool, + pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pub pending_entries: Option, + pub notify_complete: Arc } impl SessionStateInner { @@ -87,6 +88,34 @@ impl SessionStateInner { Scope::Theirs => &self.their_resources, } } + pub fn is_complete(&self) -> bool { + self.reconciliation_started + && self.pending_ranges.is_empty() + && self.pending_entries.is_none() + } + + pub fn trigger_notify_if_complete(&mut self) { + if self.is_complete() { + self.notify_complete.notify_waiters() + } + } + + pub fn notify_complete(&self) -> Arc { + Arc::clone(&self.notify_complete) + } + + pub fn setup_bind_area_of_interest( + &mut self, + msg: SetupBindAreaOfInterest, + ) -> Result { + let capability = self + .resources(Scope::Theirs) + .capabilities + .get(&msg.authorisation)?; + capability.try_granted_area(&msg.area_of_interest.area)?; + let their_handle = self.their_resources.areas_of_interest.bind(msg); + Ok(their_handle) + } pub fn authorize_send_entry( &mut self, @@ -97,12 +126,12 @@ impl SessionStateInner { static_token_handle, dynamic_token, } = message; - let mut remaining = self + let remaining = self .pending_entries - .clone() + .as_mut() .ok_or(Error::InvalidMessageInCurrentState)?; - remaining -= 1; - if remaining == 0 { + *remaining -= 1; + if *remaining == 0 { self.pending_entries = None; } let static_token = self @@ -188,12 +217,42 @@ impl SessionStateInner { // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. -impl ReconcileRoutine { +impl Coroutine { + pub async fn init_reconciliation( + mut self, + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + ) -> Result<(), Error> { + debug!("init reconciliation"); + let mut state = self.state.lock().unwrap(); + let our_aoi = state.our_resources.areas_of_interest.get(&our_handle)?; + let their_aoi = state.their_resources.areas_of_interest.get(&their_handle)?; + + let our_capability = state + .our_resources + .capabilities + .get(&our_aoi.authorisation)?; + let namespace: NamespaceId = our_capability.granted_namespace().into(); + + let common_aoi = &our_aoi + .area() + .intersection(&their_aoi.area()) + .ok_or(Error::AreaOfInterestDoesNotOverlap)?; + + let range = common_aoi.into_range(); + state.reconciliation_started = true; + drop(state); + let fingerprint = self.store.fingerprint(namespace, &range)?; + self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) + .await?; + Ok(()) + } pub async fn on_send_fingerprint( mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { + debug!("on_send_fingerprint start"); let ReconciliationSendFingerprint { range, fingerprint: their_fingerprint, @@ -243,12 +302,14 @@ impl ReconcileRoutine { self.split_range_and_send_parts(namespace, &range, our_handle, their_handle) .await?; } + debug!("on_send_fingerprint done"); Ok(()) } pub async fn on_announce_entries( mut self, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { + debug!("on_announce_entries start"); let ReconciliationAnnounceEntries { range, count, @@ -261,10 +322,12 @@ impl ReconcileRoutine { let namespace = { let mut state = self.state.lock().unwrap(); + debug!(?state, "STATE"); state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; if state.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } + debug!("after"); let namespace = state.range_is_authorised(&range, &our_handle, &their_handle)?; if count != 0 { state.pending_entries = Some(count); @@ -283,6 +346,7 @@ impl ReconcileRoutine { ) .await?; } + debug!("on_announce_entries done"); Ok(()) } @@ -332,8 +396,11 @@ impl ReconcileRoutine { let sender = self.channels.sender(message.logical_channel()); loop { - match sender.send(&message)? { - WriteOutcome::Ok => break Ok(()), + match sender.send_or_set_notify(&message)? { + WriteOutcome::Ok => { + info!(msg=%message, "sent"); + break Ok(()); + } WriteOutcome::BufferFull => { self.co.yield_(Yield::SendBufferFull(channel)).await; } diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index aa8253bec3..026df183de 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -1,6 +1,7 @@ use std::{ collections::{hash_map, HashMap, VecDeque}, sync::{Arc, Mutex}, + thread::JoinHandle, }; use futures::{ @@ -11,89 +12,183 @@ use genawaiter::{ sync::{Co, Gen}, GeneratorState, }; -use tracing::error; +use tokio::sync::oneshot; +use tracing::{debug, error, error_span, info, instrument, warn}; // use iroh_net::NodeId; use super::Store; use crate::{ - proto::wgps::{LogicalChannel, Message, ReconciliationSendEntry}, + proto::{ + grouping::{NamespacedRange, ThreeDRange}, + keys::NamespaceId, + wgps::{AreaOfInterestHandle, LogicalChannel, Message, ReconciliationSendEntry}, + willow::{AuthorisedEntry, Entry}, + }, session::{ - coroutine::{Channels, ReconcileRoutine, SessionState, Yield}, + coroutine::{Channels, Coroutine, SessionState, Yield}, Error, }, util::channel::{self, ReadOutcome, Receiver}, }; +use iroh_base::key::NodeId; pub const CHANNEL_CAP: usize = 1024; -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] -pub struct SessionId(u64); -pub type NodeId = SessionId; +// #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +// pub struct SessionId(u64); +// pub type NodeId = SessionId; +#[derive(Debug, Clone)] pub struct StoreHandle { tx: flume::Sender, + join_handle: Arc>>, +} + +#[derive(Debug, Clone)] +pub enum Interest { + Send, + Recv, +} + +#[derive(Debug, Clone)] +pub struct Notifier { + store: StoreHandle, + peer: NodeId, + channel: LogicalChannel, + direction: Interest, +} + +impl Notifier { + pub fn channel(&self) -> LogicalChannel { + self.channel + } + pub async fn notify(&self) -> anyhow::Result<()> { + let msg = match self.direction { + Interest::Send => ToActor::ResumeSend { + peer: self.peer, + channel: self.channel, + }, + Interest::Recv => ToActor::ResumeRecv { + peer: self.peer, + channel: self.channel, + }, + }; + self.store.send(msg).await?; + Ok(()) + } } + impl StoreHandle { - pub fn spawn(store: S) -> StoreHandle { + pub fn spawn(store: S, me: NodeId) -> StoreHandle { let (tx, rx) = flume::bounded(CHANNEL_CAP); - let _join_handle = std::thread::spawn(move || { - let actor = StorageThread { - store, - sessions: Default::default(), - actor_rx: rx, - }; - if let Err(error) = actor.run() { - error!(?error, "storage thread failed"); - }; - }); - StoreHandle { tx } + let join_handle = std::thread::Builder::new() + .name("sync-actor".to_string()) + .spawn(move || { + let span = error_span!("store", me=%me.fmt_short()); + let _enter = span.enter(); + + let mut actor = StorageThread { + store, + sessions: Default::default(), + actor_rx: rx, + }; + if let Err(error) = actor.run() { + error!(?error, "storage thread failed"); + }; + }) + .expect("failed to spawn thread"); + let join_handle = Arc::new(Some(join_handle)); + StoreHandle { tx, join_handle } } pub async fn send(&self, action: ToActor) -> anyhow::Result<()> { self.tx.send_async(action).await?; Ok(()) } + pub fn notifier( + &self, + channel: LogicalChannel, + direction: Interest, + peer: NodeId, + ) -> Notifier { + Notifier { + store: self.clone(), + peer, + channel, + direction, + } + } } -#[derive(Debug)] +impl Drop for StoreHandle { + fn drop(&mut self) { + // this means we're dropping the last reference + if let Some(handle) = Arc::get_mut(&mut self.join_handle) { + self.tx.send(ToActor::Shutdown { reply: None }).ok(); + let handle = handle.take().expect("this can only run once"); + if let Err(err) = handle.join() { + warn!(?err, "Failed to join sync actor"); + } + } + } +} +#[derive(derive_more::Debug)] pub enum ToActor { InitSession { peer: NodeId, + #[debug(skip)] state: SessionState, + #[debug(skip)] channels: Arc, + start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, }, DropSession { peer: NodeId, }, - ResumeWrite { + ResumeSend { peer: NodeId, channel: LogicalChannel, }, - ResumeRead { + ResumeRecv { peer: NodeId, channel: LogicalChannel, }, + GetEntries { + namespace: NamespaceId, + #[debug(skip)] + reply: flume::Sender, + }, + Shutdown { + #[debug(skip)] + reply: Option>, + }, } #[derive(Debug)] struct StorageSession { state: SessionState, channels: Arc, - waiting: WaitingCoroutines, + pending: PendingCoroutines, } #[derive(derive_more::Debug, Default)] -struct WaitingCoroutines { +struct PendingCoroutines { #[debug("{}", "on_control.len()")] on_control: VecDeque, #[debug("{}", "on_reconciliation.len()")] on_reconciliation: VecDeque, } -impl WaitingCoroutines { +impl PendingCoroutines { fn get_mut(&mut self, channel: LogicalChannel) -> &mut VecDeque { match channel { - LogicalChannel::ControlChannel => &mut self.on_control, - LogicalChannel::ReconciliationChannel => &mut self.on_reconciliation, + LogicalChannel::Control => &mut self.on_control, + LogicalChannel::Reconciliation => &mut self.on_reconciliation, + } + } + fn get(&self, channel: LogicalChannel) -> &VecDeque { + match channel { + LogicalChannel::Control => &self.on_control, + LogicalChannel::Reconciliation => &self.on_reconciliation, } } fn push_back(&mut self, channel: LogicalChannel, generator: ReconcileGen) { @@ -105,6 +200,9 @@ impl WaitingCoroutines { fn pop_front(&mut self, channel: LogicalChannel) -> Option { self.get_mut(channel).pop_front() } + fn len(&self, channel: LogicalChannel) -> usize { + self.get(channel).len() + } fn is_empty(&self) -> bool { self.on_control.is_empty() && self.on_reconciliation.is_empty() @@ -122,71 +220,82 @@ type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; type ReconcileGen = Gen; impl StorageThread { - pub fn run(mut self) -> anyhow::Result<()> { + pub fn run(&mut self) -> anyhow::Result<()> { loop { - match self.actor_rx.recv() { + let message = match self.actor_rx.recv() { Err(_) => break, - Ok(message) => self.handle_message(message)?, + Ok(message) => message, + }; + match message { + ToActor::Shutdown { reply } => { + if let Some(reply) = reply { + reply.send(()).ok(); + } + break; + } + message => self.handle_message(message)?, } } Ok(()) } fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { + debug!(?message, "tick: handle_message"); match message { + ToActor::Shutdown { .. } => unreachable!("handled in run"), ToActor::InitSession { peer, state, channels, + start, } => { let session = StorageSession { state, channels, - waiting: Default::default(), + pending: Default::default(), }; self.sessions.insert(peer, session); - self.resume_read(peer, LogicalChannel::ReconciliationChannel)?; + if let Some((our_handle, their_handle)) = start { + self.start_coroutine(peer, |routine| { + routine + .init_reconciliation(our_handle, their_handle) + .boxed_local() + })?; + } + self.resume_recv(peer, LogicalChannel::Reconciliation)?; + self.resume_send(peer, LogicalChannel::Reconciliation)?; + self.resume_send(peer, LogicalChannel::Control)?; } ToActor::DropSession { peer } => { self.sessions.remove(&peer); } - ToActor::ResumeWrite { peer, channel } => { - self.resume_write(peer, channel)?; + ToActor::ResumeSend { peer, channel } => { + self.resume_send(peer, channel)?; } - ToActor::ResumeRead { peer, channel } => { - self.resume_read(peer, channel)?; + ToActor::ResumeRecv { peer, channel } => { + self.resume_recv(peer, channel)?; } - } - Ok(()) - } - fn resume_read(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { - let channel = self.session(&peer)?.channels.receiver(channel).clone(); - loop { - match channel.read_message()? { - ReadOutcome::NeedMoreData => { - channel.need_notify(); - break; - } - ReadOutcome::Item(message) => { - self.on_message(peer, message)?; + ToActor::GetEntries { namespace, reply } => { + let entries = self + .store + .get_entries(namespace, &ThreeDRange::full()) + .filter_map(|r| r.ok()); + for entry in entries { + reply.send(entry).ok(); } } } Ok(()) } - fn session_mut(&mut self, peer: &NodeId) -> Result<&mut StorageSession, Error> { - self.sessions - .get_mut(peer) - .ok_or(Error::InvalidMessageInCurrentState) + self.sessions.get_mut(peer).ok_or(Error::SessionNotFound) } fn session(&mut self, peer: &NodeId) -> Result<&StorageSession, Error> { - self.sessions - .get(peer) - .ok_or(Error::InvalidMessageInCurrentState) + self.sessions.get(peer).ok_or(Error::SessionNotFound) } fn on_message(&mut self, peer: NodeId, message: Message) -> Result<(), Error> { + info!(msg=%message, "recv"); match message { Message::ReconciliationSendFingerprint(message) => { self.start_coroutine(peer, |routine| { @@ -200,31 +309,47 @@ impl StorageThread { } Message::ReconciliationSendEntry(message) => { let session = self.session_mut(&peer)?; - let authorised_entry = session - .state - .lock() - .unwrap() - .authorize_send_entry(message)?; + let authorised_entry = { + let mut state = session.state.lock().unwrap(); + let authorised_entry = state.authorize_send_entry(message)?; + state.trigger_notify_if_complete(); + authorised_entry + }; self.store.ingest_entry(&authorised_entry)?; + debug!("ingested entry"); } _ => return Err(Error::UnsupportedMessage), } + let session = self.session(&peer)?; + let state = session.state.lock().unwrap(); + let started = state.reconciliation_started; + let pending_ranges = &state.pending_ranges; + let pending_entries = &state.pending_entries; + let is_complete = state.is_complete(); + info!( + is_complete, + started, + ?pending_entries, + ?pending_ranges, + "handled" + ); + Ok(()) } fn start_coroutine( &mut self, peer: NodeId, - producer: impl FnOnce(ReconcileRoutine) -> ReconcileFut, + producer: impl FnOnce(Coroutine) -> ReconcileFut, ) -> Result<(), Error> { - let session = self.sessions.get_mut(&peer).ok_or(Error::SessionLost)?; + let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let snapshot = Arc::new(self.store.snapshot()?); let channels = session.channels.clone(); let state = session.state.clone(); - let mut generator = Gen::new(move |co| { - let routine = ReconcileRoutine { + let generator = Gen::new(move |co| { + let routine = Coroutine { store: snapshot, channels, state, @@ -232,30 +357,63 @@ impl StorageThread { }; (producer)(routine) }); - match generator.resume() { - GeneratorState::Yielded(Yield::SendBufferFull(channel)) => { - session.waiting.push_back(channel, generator); + self.resume_coroutine(peer, generator) + } + + #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] + fn resume_recv(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + let session = self.session(&peer)?; + debug!("resume"); + let channel = session.channels.receiver(channel).clone(); + loop { + match channel.read_message_or_set_notify()? { + ReadOutcome::Closed => { + debug!("yield: Closed"); + break; + } + ReadOutcome::ReadBufferEmpty => { + debug!("yield: ReadBufferEmpty"); + break; + } + ReadOutcome::Item(message) => { + debug!(?message, "recv"); + self.on_message(peer, message)?; + } + } + } + Ok(()) + } + + #[instrument(skip_all, fields(session=%peer.fmt_short(), ch=%channel.fmt_short()))] + fn resume_send(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + let session = self.session_mut(&peer)?; + debug!(pending = session.pending.len(channel), "resume"); + let generator = session.pending.pop_front(channel); + match generator { + Some(generator) => self.resume_coroutine(peer, generator), + None => { + debug!("nothing to resume"); Ok(()) } - GeneratorState::Complete(res) => res, } } - fn resume_write(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + fn resume_coroutine(&mut self, peer: NodeId, mut generator: ReconcileGen) -> Result<(), Error> { + debug!(session = peer.fmt_short(), "resume"); let session = self.session_mut(&peer)?; - let Some(mut generator) = session.waiting.pop_front(channel) else { - // debug_assert!(false, "resume_coroutine called but no generator"); - // TODO: error? - return Ok(()); - }; match generator.resume() { GeneratorState::Yielded(why) => match why { Yield::SendBufferFull(channel) => { - session.waiting.push_front(channel, generator); + debug!("yield: SendBufferFull"); + session.pending.push_back(channel, generator); Ok(()) } }, - GeneratorState::Complete(res) => res, + GeneratorState::Complete(res) => { + debug!(?res, "done"); + session.state.lock().unwrap().trigger_notify_if_complete(); + res + } } } } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index b6cc25ed7d..b49689ca99 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -2,7 +2,7 @@ use std::io; pub mod channel; -pub trait Encoder { +pub trait Encoder: std::fmt::Debug { fn encoded_len(&self) -> usize; fn encode_into(&self, out: &mut W) -> anyhow::Result<()>; diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index d80dbf89dc..ece52bf8d4 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -6,6 +6,7 @@ use std::{ use bytes::{Buf, Bytes, BytesMut}; use tokio::sync::Notify; +use tracing::{debug, info}; use crate::proto::wgps::Message; @@ -20,6 +21,7 @@ struct Shared { write_blocked: bool, need_read_notify: bool, need_write_notify: bool, + closed: bool } impl Shared { @@ -32,12 +34,23 @@ impl Shared { write_blocked: false, need_read_notify: false, need_write_notify: false, + closed: false } } + fn close(&mut self) { + self.closed = true; + } + fn closed(&self) -> bool { + self.closed + } fn read_slice(&self) -> &[u8] { &self.buf[..] } + fn read_buf_empty(&self) -> bool { + self.buf.is_empty() + } + fn read_advance(&mut self, cnt: usize) { self.buf.advance(cnt); if cnt > 0 && self.write_blocked { @@ -64,16 +77,21 @@ impl Shared { let new_len = self.buf.remaining() + len; // TODO: check if the potential truncate harms perf self.buf.resize(new_len, 0u8); - self.notify_readable.notify_one(); Some(&mut self.buf[old_len..new_len]) } } fn write_message(&mut self, item: &T) -> anyhow::Result { let len = item.encoded_len(); + // debug!(?item, len = len, "write_message"); if let Some(slice) = self.write_slice(len) { + // debug!(len = slice.len(), "write_message got slice"); let mut cursor = io::Cursor::new(slice); item.encode_into(&mut cursor)?; + // debug!("RES {res:?}"); + // res?; + self.notify_readable.notify_one(); + // debug!("wrote and notified"); Ok(WriteOutcome::Ok) } else { Ok(WriteOutcome::BufferFull) @@ -82,8 +100,11 @@ impl Shared { fn read_message(&mut self) -> anyhow::Result> { let data = self.read_slice(); + if self.closed() { + return Ok(ReadOutcome::Closed); + } let res = match T::decode_from(data)? { - DecodeOutcome::NeedMoreData => ReadOutcome::NeedMoreData, + DecodeOutcome::NeedMoreData => ReadOutcome::ReadBufferEmpty, DecodeOutcome::Decoded { item, consumed } => { self.read_advance(consumed); ReadOutcome::Item(item) @@ -92,12 +113,12 @@ impl Shared { Ok(res) } - fn need_read_notify(&mut self) { - self.need_read_notify = true; - } - fn need_write_notify(&mut self) { - self.need_write_notify = true; - } + // fn receiver_want_notify(&mut self) { + // self.need_read_notify = true; + // } + // fn need_write_notify(&mut self) { + // self.need_write_notify = true; + // } fn remaining_write_capacity(&self) -> usize { self.max_buffer_size - self.buf.len() @@ -106,7 +127,8 @@ impl Shared { #[derive(Debug)] pub enum ReadOutcome { - NeedMoreData, + ReadBufferEmpty, + Closed, Item(T), } @@ -136,14 +158,37 @@ impl Receiver { self.shared.lock().unwrap().read_bytes() } - pub fn read_message(&self) -> anyhow::Result> { - self.shared.lock().unwrap().read_message() + pub async fn read_bytes_async(&self) -> Option { + loop { + let notify = { + let mut shared = self.shared.lock().unwrap(); + if shared.closed() { + return None; + } + if !shared.read_buf_empty() { + return Some(shared.read_bytes()); + } + shared.notify_readable.clone() + }; + notify.notified().await + } } - pub fn need_notify(&self) { - self.shared.lock().unwrap().need_read_notify() + pub fn read_message_or_set_notify(&self) -> anyhow::Result> { + let mut shared = self.shared.lock().unwrap(); + let outcome = shared.read_message()?; + if matches!(outcome, ReadOutcome::ReadBufferEmpty) { + shared.need_read_notify = true; + } + Ok(outcome) } + pub fn set_notify_on_receivable(&self) { + self.shared.lock().unwrap().need_read_notify = true; + } + pub fn is_sendable_notify_set(&self) -> bool { + self.shared.lock().unwrap().need_write_notify + } pub async fn notify_readable(&self) { let shared = self.shared.lock().unwrap(); if !shared.read_slice().is_empty() { @@ -154,17 +199,22 @@ impl Receiver { notify.notified().await } - pub async fn read_message_async(&self) -> anyhow::Result { + pub async fn read_message_async(&self) -> anyhow::Result> { loop { - let mut shared = self.shared.lock().unwrap(); - let notify = Arc::clone(&shared.notify_readable); - match shared.read_message()? { - ReadOutcome::NeedMoreData => { - drop(shared); - notify.notified().await; + let notify = { + let mut shared = self.shared.lock().unwrap(); + match shared.read_message()? { + ReadOutcome::ReadBufferEmpty => shared.notify_readable.clone(), + ReadOutcome::Closed => return Ok(None), + ReadOutcome::Item(item) => { + // debug!("read_message_async read"); + return Ok(Some(item)); + } } - ReadOutcome::Item(item) => return Ok(item), - } + }; + // debug!("read_message_async NeedMoreData wait"); + notify.notified().await; + // debug!("read_message_async NeedMoreData notified"); } } } @@ -189,33 +239,45 @@ impl Sender { // let mut shared = self.shared.lock().unwrap(); // shared.write_slice(len) // } - pub fn need_notify(&self) { - self.shared.lock().unwrap().need_write_notify() + pub fn set_notify_on_sendable(&self) { + self.shared.lock().unwrap().need_write_notify = true; } - fn write_slice(&self, data: &[u8]) -> bool { - let mut shared = self.shared.lock().unwrap(); - match shared.write_slice(data.len()) { - None => false, - Some(out) => { - out.copy_from_slice(data); - true - } - } + pub fn is_receivable_notify_set(&self) -> bool { + self.shared.lock().unwrap().need_read_notify + } + + pub fn close(&self) { + self.shared.lock().unwrap().close() } - async fn write_slice_async(&self, data: &[u8]) -> bool { + // fn write_slice(&self, data: &[u8]) -> bool { + // let mut shared = self.shared.lock().unwrap(); + // match shared.write_slice(data.len()) { + // None => false, + // Some(out) => { + // out.copy_from_slice(data); + // true + // } + // } + // } + + pub async fn write_slice_async(&self, data: &[u8]) { loop { - let mut shared = self.shared.lock().unwrap(); - if shared.remaining_write_capacity() < data.len() { - let notify = shared.notify_writable.clone(); - drop(shared); - notify.notified().await; - } else { - let out = shared.write_slice(data.len()).expect("just checked"); - out.copy_from_slice(data); - return true; - } + let notify = { + let mut shared = self.shared.lock().unwrap(); + if shared.remaining_write_capacity() < data.len() { + let notify = shared.notify_writable.clone(); + notify.clone() + } else { + let out = shared.write_slice(data.len()).expect("just checked"); + out.copy_from_slice(data); + shared.notify_readable.notify_one(); + break; + // return true; + } + }; + notify.notified().await; } } @@ -233,11 +295,20 @@ impl Sender { self.shared.lock().unwrap().remaining_write_capacity() } + pub fn send_or_set_notify(&self, message: &T) -> anyhow::Result { + let mut shared = self.shared.lock().unwrap(); + let outcome = shared.write_message(message)?; + if matches!(outcome, WriteOutcome::BufferFull) { + shared.need_write_notify = true; + } + Ok(outcome) + } + pub fn send(&self, message: &T) -> anyhow::Result { self.shared.lock().unwrap().write_message(message) } - // pub async fn send_co( + // pub async fn sNamespacePublicKeyend_co( // &self, // message: &T, // yield_fn: F, @@ -257,17 +328,16 @@ impl Sender { // } // } - pub async fn send_async(&self, message: T) -> anyhow::Result<()> { + pub async fn send_async(&self, message: &T) -> anyhow::Result<()> { loop { - let mut shared = self.shared.lock().unwrap(); - match shared.write_message(&message)? { - WriteOutcome::Ok => return Ok(()), - WriteOutcome::BufferFull => { - let notify = shared.notify_writable.clone(); - drop(shared); - notify.notified().await; + let notify = { + let mut shared = self.shared.lock().unwrap(); + match shared.write_message(message)? { + WriteOutcome::Ok => return Ok(()), + WriteOutcome::BufferFull => shared.notify_writable.clone(), } - } + }; + notify.notified().await; } } } From bbc005724031ba15506b864fa6571982063dcc6f Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 7 May 2024 23:53:20 +0200 Subject: [PATCH 016/198] working --- iroh-willow/src/net.rs | 330 ++++++++++++-------- iroh-willow/src/proto/grouping.rs | 5 +- iroh-willow/src/proto/wgps.rs | 41 ++- iroh-willow/src/session.rs | 432 +-------------------------- iroh-willow/src/session/coroutine.rs | 218 ++++++++++++-- iroh-willow/src/session/resource.rs | 50 +++- iroh-willow/src/store/actor.rs | 381 +++++++++++++---------- iroh-willow/src/util.rs | 2 +- iroh-willow/src/util/channel.rs | 42 +-- 9 files changed, 751 insertions(+), 750 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 1b245a5c3b..026564ecca 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,7 +1,7 @@ use std::{pin::Pin, sync::Arc, task::Poll}; use anyhow::{anyhow, ensure, Context}; -use futures::{FutureExt, SinkExt, Stream}; +use futures::{FutureExt, SinkExt, Stream, TryFutureExt}; use iroh_base::{hash::Hash, key::NodeId}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, @@ -9,18 +9,18 @@ use tokio::{ }; // use tokio_stream::StreamExt; // use tokio_util::codec::{FramedRead, FramedWrite}; -use tracing::{debug, instrument, Instrument, Span}; +use tracing::{debug, error_span, info, instrument, Instrument, Span}; use crate::{ proto::wgps::{ - AccessChallenge, ChallengeHash, LogicalChannel, CHALLENGE_HASH_LENGTH, + AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, - session::{coroutine::Channels, Role, Session, SessionInit}, - store::actor::{ - Interest, Notifier, StoreHandle, - ToActor::{self, ResumeRecv}, + session::{ + coroutine::{Channels, Yield}, + Role, Session, SessionInit, }, + store::actor::{Interest, Notifier, StoreHandle, ToActor}, util::{ channel::{channel, Receiver, Sender}, Decoder, Encoder, @@ -52,37 +52,58 @@ pub async fn run( our_role: Role, init: SessionInit, ) -> anyhow::Result<()> { - let (mut control_send, mut control_recv) = match our_role { + let (mut control_send_stream, mut control_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; + control_send_stream.set_priority(i32::MAX)?; let our_nonce: AccessChallenge = rand::random(); debug!("start"); - let (received_commitment, max_payload_size) = - exchange_commitments(&mut control_send, &mut control_recv, &our_nonce).await?; + let (received_commitment, max_payload_size) = exchange_commitments( + &mut control_send_stream, + &mut control_recv_stream, + &our_nonce, + ) + .await?; debug!("exchanged comittments"); - let (mut reconciliation_send, mut reconciliation_recv) = match our_role { + let (mut reconciliation_send_stream, mut reconciliation_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; - reconciliation_send.write_u8(0u8).await?; - reconciliation_recv.read_u8().await?; + reconciliation_send_stream.write_u8(0u8).await?; + reconciliation_recv_stream.read_u8().await?; debug!("reconcile channel open"); - let (reconciliation_send_tx, reconciliation_send_rx) = channel(1024); - let (reconciliation_recv_tx, reconciliation_recv_rx) = channel(1024); - let (control_send_tx, control_send_rx) = channel(1024); - let (control_recv_tx, control_recv_rx) = channel(1024); + let mut join_set = JoinSet::new(); + let (control_send, control_recv) = spawn_channel( + &mut join_set, + &store, + peer, + LogicalChannel::Control, + 1024, + control_send_stream, + control_recv_stream, + ); + let (reconciliation_send, reconciliation_recv) = spawn_channel( + &mut join_set, + &store, + peer, + LogicalChannel::Reconciliation, + 1024, + reconciliation_send_stream, + reconciliation_recv_stream, + ); + let channels = Channels { - control_send: control_send_tx, - control_recv: control_recv_rx, - reconciliation_send: reconciliation_send_tx, - reconciliation_recv: reconciliation_recv_rx, + control_send, + control_recv, + reconciliation_send, + reconciliation_recv, }; - let session = Session::new( + let mut session = Session::new( peer, our_role, our_nonce, @@ -93,119 +114,83 @@ pub async fn run( store.clone(), ); - let res = { - let on_complete = session.notify_complete(); + let on_complete = session.notify_complete(); + let session_fut = async move { session.run_control().await }; - let session_fut = session.run_control(); - - let control_recv_fut = recv_loop( - &mut control_recv, - control_recv_tx, - store.notifier(LogicalChannel::Control, Interest::Recv, peer), - ); - let reconciliation_recv_fut = recv_loop( - &mut reconciliation_recv, - reconciliation_recv_tx, - store.notifier(LogicalChannel::Reconciliation, Interest::Recv, peer), - ); - let control_send_fut = send_loop( - &mut control_send, - control_send_rx, - store.notifier(LogicalChannel::Control, Interest::Send, peer), - ); - let reconciliation_send_fut = send_loop( - &mut reconciliation_send, - reconciliation_send_rx, - store.notifier(LogicalChannel::Reconciliation, Interest::Send, peer), - ); - tokio::pin!(session_fut); - tokio::pin!(control_send_fut); - tokio::pin!(reconciliation_send_fut); - tokio::pin!(control_recv_fut); - tokio::pin!(reconciliation_recv_fut); - - // let finish_tasks_fut = async { - // Result::<_, anyhow::Error>::Ok(()) - // }; - // - // finish_tasks_fut.await?; - // Ok(()) - let mut completed = false; - tokio::select! { - biased; - _ = on_complete.notified() => { - tracing::warn!("COMPLETE"); - channels.close_send(); - completed = true; - } - res = &mut session_fut => res.context("session")?, - res = &mut control_recv_fut => res.context("control_recv")?, - res = &mut control_send_fut => res.context("control_send")?, - res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, - res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, - } - tracing::warn!("CLOSED"); - if completed { - // control_send.finish().await?; - Ok(()) - } else { - Err(anyhow!( - "All tasks finished but reconciliation did not complete" - )) - } - // tokio::pin!(finish_tasks_fut); - // let res = tokio::select! { - // res = &mut finish_tasks_fut => { - // match res { - // // we completed before on_complete was triggered: no success - // Ok(()) => Err(anyhow!("all tasks finished but reconciliation was not completed")), - // Err(err) => Err(err), - // } - // } - // _ = on_complete.notified()=> { - // // finish_tasks_fut.abort(); - // // join_set.abort_all(); - // Ok(()) - // } - // }; - // res + let notified_fut = async move { + on_complete.notified().await; + tracing::info!("reconciliation complete"); + channels.close_send(); + Ok(()) }; - control_send.finish().await?; - reconciliation_send.finish().await?; - res + join_set.spawn(session_fut.map_err(anyhow::Error::from)); + join_set.spawn(notified_fut); + while let Some(res) = join_set.join_next().await { + res??; + } + Ok(()) +} + +fn spawn_channel( + join_set: &mut JoinSet>, + store: &StoreHandle, + peer: NodeId, + ch: LogicalChannel, + cap: usize, + send_stream: quinn::SendStream, + recv_stream: quinn::RecvStream, +) -> (Sender, Receiver) { + let (send_tx, send_rx) = channel(cap); + let (recv_tx, recv_rx) = channel(cap); + + let recv_fut = recv_loop( + recv_stream, + recv_tx, + store.notifier(peer, Yield::ChannelPending(ch, Interest::Recv)), + ) + .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + + join_set.spawn(recv_fut); + + let send_fut = send_loop( + send_stream, + send_rx, + store.notifier(peer, Yield::ChannelPending(ch, Interest::Send)), + ) + .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + + join_set.spawn(send_fut); + + (send_tx, recv_rx) } -#[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] +// #[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] async fn recv_loop( - recv_stream: &mut quinn::RecvStream, + mut recv_stream: quinn::RecvStream, channel_sender: Sender, notifier: Notifier, ) -> anyhow::Result<()> { loop { - // debug!("wait"); let buf = recv_stream.read_chunk(1024 * 16, true).await?; if let Some(buf) = buf { channel_sender.write_slice_async(&buf.bytes[..]).await; debug!(len = buf.bytes.len(), "recv"); if channel_sender.is_receivable_notify_set() { - debug!("notify ResumeRecv"); + debug!("notify"); notifier.notify().await?; - // store_handle - // .send(ToActor::ResumeRecv { peer, channel }) - // .await?; } } else { - debug!("EOF"); break; } } - // recv_stream.stop() + channel_sender.close(); + debug!("recv_loop close"); Ok(()) } -#[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] +// #[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] async fn send_loop( - send_stream: &mut quinn::SendStream, + mut send_stream: quinn::SendStream, channel_receiver: Receiver, notifier: Notifier, ) -> anyhow::Result<()> { @@ -214,11 +199,12 @@ async fn send_loop( send_stream.write_chunk(data).await?; debug!(len, "sent"); if channel_receiver.is_sendable_notify_set() { - debug!("notify ResumeSend"); + debug!("notify"); notifier.notify().await?; } } send_stream.finish().await?; + debug!("send_loop close"); Ok(()) } @@ -272,11 +258,6 @@ mod tests { #[tokio::test] async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); - // use tracing_chrome::ChromeLayerBuilder; - // use tracing_subscriber::{prelude::*, registry::Registry}; - // let (chrome_layer, _guard) = ChromeLayerBuilder::new().build(); - // tracing_subscriber::registry().with(chrome_layer).init(); - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); let n_betty = 1; let n_alfie = 2; @@ -520,3 +501,118 @@ mod tests { // Some(err) => Err(err), // } // }; +// tracing::info!("COMPLETE"); +// channels.close_send(); +// completed = true; +// } + +// let channel_futs = [control_send_fut, reconciliation_send_fut, control_recv_fut, reconciliation_recv_fut]; +// let channel_futs = tokio::join!(control_send_ft); +// +// let channel_fut = async move { +// tokio::join!( +// session_fut, +// control_send_fut, +// reconciliation_send_fut, +// control_recv_fut, +// reconciliation_recv_fut +// ) +// }; +// tokio::pin!(channel_fut); +// let channel_fut = async move { +// let +// // res = &mut session_fut => res.context("session")?, +// // res = &mut control_recv_fut => res.context("control_recv")?, +// // res = &mut control_send_fut => res.context("control_send")?, +// // res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, +// // res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, +// } +// tokio::pin!(channel_fut); +// let mut completed = false; +// tokio::select! { +// biased; +// _ = on_complete.notified() => { +// tracing::info!("COMPLETE"); +// channels.close_send(); +// completed = true; +// } +// // res = &mut channel_fut => { +// // res.0?; +// // res.1?; +// // res.2?; +// // res.3?; +// // res.4?; +// // } +// res = &mut session_fut => res.context("session")?, +// res = &mut control_recv_fut => res.context("control_recv")?, +// res = &mut control_send_fut => res.context("control_send")?, +// res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, +// res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, +// } +// tracing::info!(?completed, "!CLOSED!"); +// if completed { +// let res = tokio::join!( +// session_fut, +// control_send_fut, +// reconciliation_send_fut, +// control_recv_fut, +// reconciliation_recv_fut +// ); +// // let res = channel_fut.await; +// res.0?; +// res.1?; +// res.2?; +// res.3?; +// res.4?; +// +// // control_send_fut.await?; +// // info!("control_send down"); +// // reconciliation_send_fut.await?; +// // info!("reconciliation_send down"); +// // +// // session_fut.await?; +// // info!("session down"); +// // +// // control_recv_fut.await?; +// // info!("control_recv down"); +// // reconciliation_recv_fut.await?; +// // info!("reconciliation_recv down"); +// // control_send.finish().await?; +// Ok(()) +// } else { +// Err(anyhow!( +// "All tasks finished but reconciliation did not complete" +// )) +// } +// tokio::pin!(finish_tasks_fut); +// let res = tokio::select! { +// res = &mut finish_tasks_fut => { +// match res { +// // we completed before on_complete was triggered: no success +// Ok(()) => Err(anyhow!("all tasks finished but reconciliation was not completed")), +// Err(err) => Err(err), +// } +// } +// _ = on_complete.notified()=> { +// // finish_tasks_fut.abort(); +// // join_set.abort_all(); +// Ok(()) +// } +// }; +// res +// tokio::pin!(session_fut); +// tokio::pin!(control_send_fut); +// tokio::pin!(reconciliation_send_fut); +// tokio::pin!(control_recv_fut); +// tokio::pin!(reconciliation_recv_fut); +// tokio::pin!(notified_fut); +// let res = tokio::join!( +// session_fut, +// control_send_fut, +// reconciliation_send_fut, +// control_recv_fut, +// reconciliation_recv_fut, +// notified_fut +// ); +// tracing::warn!("RES {res:?}"); +// Ok(()) diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index c4b6762c39..e913e2012e 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -3,7 +3,10 @@ use std::cmp::Ordering; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use super::{keys::NamespaceId, willow::{Entry, Path, SubspaceId, Timestamp}}; +use super::{ + keys::NamespaceId, + willow::{Entry, Path, SubspaceId, Timestamp}, +}; /// A three-dimensional range on a specific namespace. #[derive(Debug)] diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 16e5771859..dc3ba49aad 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -47,7 +47,7 @@ pub type SyncSignature = meadowcap::UserSignature; pub type Receiver = meadowcap::UserPublicKey; /// The different resource handles employed by the WGPS. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, strum::Display)] pub enum HandleType { /// Resource handle for the private set intersection part of private area intersection. /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: @@ -69,7 +69,7 @@ pub enum HandleType { } /// The different logical channels employed by the WGPS. -#[derive(Debug, Serialize, Deserialize, Copy, Clone)] +#[derive(Debug, Serialize, Deserialize, Copy, Clone, Eq, PartialEq, Hash)] pub enum LogicalChannel { /// Control channel Control, @@ -118,29 +118,52 @@ pub struct CapabilityHandle(u64); #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] pub struct StaticTokenHandle(u64); -pub trait Handle: std::hash::Hash + From + Copy + Eq + PartialEq { +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub enum ResourceHandle { + AreaOfInterest(AreaOfInterestHandle), + Intersection(IntersectionHandle), + Capability(CapabilityHandle), + StaticToken(StaticTokenHandle), +} + +pub trait IsHandle: + std::fmt::Debug + std::hash::Hash + From + Into + Copy + Eq + PartialEq +{ fn handle_type(&self) -> HandleType; + fn value(&self) -> u64; } -impl Handle for CapabilityHandle { +impl IsHandle for CapabilityHandle { fn handle_type(&self) -> HandleType { HandleType::Capability } + fn value(&self) -> u64 { + self.0 + } } -impl Handle for StaticTokenHandle { +impl IsHandle for StaticTokenHandle { fn handle_type(&self) -> HandleType { HandleType::StaticToken } + fn value(&self) -> u64 { + self.0 + } } -impl Handle for AreaOfInterestHandle { +impl IsHandle for AreaOfInterestHandle { fn handle_type(&self) -> HandleType { HandleType::AreaOfInterest } + fn value(&self) -> u64 { + self.0 + } } -impl Handle for IntersectionHandle { +impl IsHandle for IntersectionHandle { fn handle_type(&self) -> HandleType { HandleType::Intersection } + fn value(&self) -> u64 { + self.0 + } } /// Complete the commitment scheme to determine the challenge for read authentication. @@ -337,7 +360,7 @@ pub struct ReconciliationAnnounceEntries { } /// Transmit a LengthyEntry as part of 3d range-based set reconciliation. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationSendEntry { /// The LengthyEntry itself. pub entry: LengthyEntry, @@ -358,7 +381,7 @@ impl ReconciliationSendEntry { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { /// The Entry in question. pub entry: Entry, diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 8d2ca026f5..8199ba264b 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -17,10 +17,10 @@ use crate::{ meadowcap::InvalidCapability, wgps::{ AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, - ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, - StaticTokenHandle, + CommitmentReveal, Fingerprint, HandleType, LengthyEntry, LogicalChannel, Message, + ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, + SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, }, willow::{AuthorisationToken, AuthorisedEntry, Unauthorised}, }, @@ -36,8 +36,6 @@ use self::{ resource::ScopedResources, }; -const LOGICAL_CHANNEL_CAP: usize = 128; - pub mod coroutine; pub mod resource; mod util; @@ -48,8 +46,8 @@ pub enum Error { Store(#[from] anyhow::Error), #[error("wrong secret key for capability")] WrongSecretKeyForCapability, - #[error("missing resource")] - MissingResource, + #[error("missing resource {0:?}")] + MissingResource(ResourceHandle), #[error("received capability is invalid")] InvalidCapability, #[error("received capability has an invalid signature")] @@ -178,26 +176,6 @@ pub struct Session { store_handle: StoreHandle, } -// #[derive(Debug)] -// pub struct Session { -// role: Role, -// _their_maximum_payload_size: usize, -// -// init: SessionInit, -// challenge: ChallengeState, -// -// control_channel: Channel, -// reconciliation_channel: Channel, -// -// our_resources: ScopedResources, -// their_resources: ScopedResources, -// pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, -// pending_entries: Option, -// -// reconciliation_started: bool, -// our_current_aoi: Option, -// } - impl Session { pub fn new( peer: NodeId, @@ -237,46 +215,23 @@ impl Session { self.state.lock().unwrap().notify_complete() } - // pub fn drain_outbox(&mut self) -> impl Iterator + '_ { - // self.control_channel - // .outbox_drain() - // .chain(self.reconciliation_channel.outbox_drain()) - // } - // pub fn our_role(&self) -> Role { self.our_role } - // pub fn recv(&mut self, message: Message) { - // match message.logical_channel() { - // LogicalChannel::ControlChannel => self.control_channel.inbox_push_or_drop(message), - // LogicalChannel::ReconciliationChannel => { - // self.reconciliation_channel.inbox_push_or_drop(message) - // } - // } - // } - - // pub fn is_complete(&self) -> bool { - // let state = self.state.lock().unwrap(); - // state.reconciliation_started - // && state.pending_ranges.is_empty() - // && state.pending_entries.is_none() - // } - - // pub async fn run(&mut self) - #[instrument(skip_all)] - pub async fn run_control(mut self) -> Result<(), Error> { + pub async fn run_control(&mut self) -> Result<(), Error> { loop { - trace!("wait recv"); + info!("wait recv"); let message = self .channels .receiver(LogicalChannel::Control) .read_message_async() - .await?; + .await; match message { None => break, Some(message) => { + let message = message?; info!(%message, "recv"); self.process_control(message).await?; let is_complete = self.state.lock().unwrap().is_complete(); @@ -284,8 +239,8 @@ impl Session { } } } + debug!("run_control finished"); Ok(()) - // Ok(()) } async fn send_control(&self, message: impl Into) -> Result<(), Error> { @@ -345,8 +300,6 @@ impl Session { Ok(()) } - // fn resources_mut(&self, scope: Scope) -> &mut - async fn process_control(&mut self, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { @@ -395,19 +348,6 @@ impl Session { start, }; self.store_handle.send(message).await?; - - // } - // if self.our_role == Role::Alfie { - // if let Some(our_handle) = self.our_current_aoi.clone() { - // self.init_reconciliation(our_handle, their_handle).await?; - // } else { - // warn!( - // "received area of interest from remote, but no area of interest set on our side" - // ); - // } - // } else { - // - // } } Message::ControlFreeHandle(_msg) => { // TODO: Free handles @@ -416,279 +356,6 @@ impl Session { } Ok(()) } - - // fn bind_static_token(&mut self, static_token: StaticToken) -> StaticTokenHandle { - // let (handle, is_new) = self - // .our_resources - // .static_tokens - // .bind_if_new(static_token.clone()); - // if is_new { - // let msg = SetupBindStaticToken { static_token }; - // self.control_channel - // .send(Message::SetupBindStaticToken(msg)); - // } - // handle - // } - - async fn init_reconciliation( - &mut self, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - ) -> Result<(), Error> { - // let mut state = self.state.lock().unwrap(); - // let our_aoi = state.our_resources.areas_of_interest.get(&our_handle)?; - // let their_aoi = state.their_resources.areas_of_interest.get(&their_handle)?; - // - // let our_capability = state - // .our_resources - // .capabilities - // .get(&our_aoi.authorisation)?; - // let namespace = our_capability.granted_namespace(); - // - // let common_aoi = &our_aoi - // .area() - // .intersection(&their_aoi.area()) - // .ok_or(Error::AreaOfInterestDoesNotOverlap)?; - // - // let range = common_aoi.into_range(); - // state.reconciliation_started = true; - // drop(state); - // let range = NamespacedRange { - // namespace: namespace.into(), - // range, - // }; - let message = ToActor::InitSession { - peer: self.peer, - state: self.state.clone(), - channels: self.channels.clone(), - start: Some((our_handle, their_handle)), // send_fingerprint: Some(range), - }; - self.store_handle.send(message).await?; - Ok(()) - } - - // fn send_fingerprint( - // &mut self, - // range: ThreeDRange, - // fingerprint: Fingerprint, - // our_handle: AreaOfInterestHandle, - // their_handle: AreaOfInterestHandle, - // is_final_reply_for_range: Option, - // ) { - // self.pending_ranges.insert((our_handle, range.clone())); - // let msg = ReconciliationSendFingerprint { - // range, - // fingerprint, - // sender_handle: our_handle, - // receiver_handle: their_handle, - // is_final_reply_for_range, - // }; - // self.reconciliation_channel.send(msg); - // } - - // fn announce_empty( - // &mut self, - // range: ThreeDRange, - // our_handle: AreaOfInterestHandle, - // their_handle: AreaOfInterestHandle, - // want_response: bool, - // is_final_reply_for_range: Option, - // ) -> Result<(), Error> { - // if want_response { - // self.pending_ranges.insert((our_handle, range.clone())); - // } - // let msg = ReconciliationAnnounceEntries { - // range, - // count: 0, - // want_response, - // will_sort: false, - // sender_handle: our_handle, - // receiver_handle: their_handle, - // is_final_reply_for_range, - // }; - // self.reconciliation_channel - // .send(Message::ReconciliationAnnounceEntries(msg)); - // Ok(()) - // } - - // - // fn process_reconciliation( - // &mut self, - // store: &mut S, - // message: Message, - // ) -> Result<(), Error> { - // match message { - // Message::ReconciliationSendFingerprint(message) => { - // self.reconciliation_started = true; - // let ReconciliationSendFingerprint { - // range, - // fingerprint: their_fingerprint, - // sender_handle: their_handle, - // receiver_handle: our_handle, - // is_final_reply_for_range, - // } = message; - // - // self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - // - // let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; - // let our_fingerprint = store.fingerprint(namespace, &range)?; - // - // // case 1: fingerprint match. - // if our_fingerprint == their_fingerprint { - // self.announce_empty( - // range.clone(), - // our_handle, - // their_handle, - // false, - // Some(range.clone()), - // )?; - // } - // // case 2: fingerprint is empty - // else if their_fingerprint.is_empty() { - // self.announce_and_send_entries( - // store, - // namespace, - // &range, - // our_handle, - // their_handle, - // true, - // Some(range.clone()), - // None, - // )?; - // } - // // case 3: fingerprint doesn't match and is non-empty - // else { - // // reply by splitting the range into parts unless it is very short - // self.split_range_and_send_parts( - // store, - // namespace, - // &range, - // our_handle, - // their_handle, - // )?; - // } - // } - // Message::ReconciliationAnnounceEntries(message) => { - // let ReconciliationAnnounceEntries { - // range, - // count, - // want_response, - // will_sort: _, - // sender_handle: their_handle, - // receiver_handle: our_handle, - // is_final_reply_for_range, - // } = message; - // self.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - // if self.pending_entries.is_some() { - // return Err(Error::InvalidMessageInCurrentState); - // } - // let namespace = self.range_is_authorised(&range, &our_handle, &their_handle)?; - // if want_response { - // self.announce_and_send_entries( - // store, - // namespace, - // &range, - // our_handle, - // their_handle, - // false, - // Some(range.clone()), - // None, - // )?; - // } - // if count != 0 { - // self.pending_entries = Some(count); - // } - // } - // Message::ReconciliationSendEntry(message) => { - // let remaining = self - // .pending_entries - // .as_mut() - // .ok_or(Error::InvalidMessageInCurrentState)?; - // let ReconciliationSendEntry { - // entry, - // static_token_handle, - // dynamic_token, - // } = message; - // let static_token = self - // .their_resources - // .static_tokens - // .get(&static_token_handle)?; - // // TODO: avoid clone of static token? - // let authorisation_token = - // AuthorisationToken::from_parts(static_token.clone(), dynamic_token); - // let authorised_entry = - // AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - // store.ingest_entry(&authorised_entry)?; - // - // *remaining -= 1; - // if *remaining == 0 { - // self.pending_entries = None; - // } - // } - // _ => return Err(Error::UnsupportedMessage), - // } - // Ok(()) - // } - // - // fn range_is_authorised( - // &self, - // range: &ThreeDRange, - // receiver_handle: &AreaOfInterestHandle, - // sender_handle: &AreaOfInterestHandle, - // ) -> Result { - // let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; - // let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; - // if our_namespace != their_namespace { - // return Err(Error::AreaOfInterestNamespaceMismatch); - // } - // let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; - // let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; - // - // if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { - // return Err(Error::RangeOutsideCapability); - // } - // Ok(our_namespace.into()) - // } - // - // fn resources(&self, scope: Scope) -> &ScopedResources { - // match scope { - // Scope::Ours => &self.our_resources, - // Scope::Theirs => &self.their_resources, - // } - // } - - // fn resources_mut(&mut self, scope: Scope) -> &ScopedResources { - // match scope { - // Scope::Ours => &mut self.our_resources, - // Scope::Theirs => &mut self.their_resources, - // } - // } - // - // fn handle_to_capability( - // &self, - // scope: Scope, - // handle: &CapabilityHandle, - // ) -> Result<&ReadCapability, Error> { - // self.resources(scope).capabilities.get(handle) - // } - // - // fn handle_to_aoi( - // &self, - // scope: Scope, - // handle: &AreaOfInterestHandle, - // ) -> Result<&SetupBindAreaOfInterest, Error> { - // self.resources(scope).areas_of_interest.get(handle) - // } - // - // fn handle_to_namespace_id( - // &self, - // scope: Scope, - // handle: &AreaOfInterestHandle, - // ) -> Result<&NamespacePublicKey, Error> { - // let aoi = self.handle_to_aoi(scope, handle)?; - // let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; - // Ok(capability.granted_namespace()) - // } } #[derive(Copy, Clone, Debug)] @@ -697,83 +364,6 @@ pub enum Scope { Theirs, } -#[derive(Debug)] -pub struct Channel { - inbox: VecDeque, - outbox: VecDeque, - // issued_guarantees: u64, - // available_guarantees: u64, -} -impl Default for Channel { - fn default() -> Self { - Self::with_capacity(LOGICAL_CHANNEL_CAP) - } -} - -impl Channel { - pub fn with_capacity(cap: usize) -> Self { - Self { - inbox: VecDeque::with_capacity(cap), - outbox: VecDeque::with_capacity(cap), - // issued_guarantees: 0, - // available_guarantees: 0, - } - } - - // pub fn recv_guarantees(&mut self, count: u64) { - // self.available_guarantees += count; - // } - // - pub fn can_send(&self) -> bool { - self.outbox.len() < self.outbox.capacity() - } - - pub fn send(&mut self, value: impl Into) { - self.outbox.push_back(value.into()); - // self.available_guarantees -= 1; - } - - fn outbox_drain(&mut self) -> impl Iterator + '_ { - self.outbox.drain(..) - } - - fn inbox_pop(&mut self) -> Option { - self.inbox.pop_front() - } - - pub fn inbox_push_or_drop(&mut self, message: T) { - if let Some(dropped) = self.inbox_push(message) { - warn!(message=?dropped, "dropping message"); - } - } - pub fn inbox_push(&mut self, message: T) -> Option { - if self.has_inbox_capacity() { - self.inbox.push_back(message); - None - } else { - Some(message) - } - } - pub fn remaining_inbox_capacity(&self) -> usize { - // self.inbox.capacity() - self.inbox.len() - self.issued_guarantees as usize - self.inbox.capacity() - self.inbox.len() - } - - pub fn has_inbox_capacity(&self) -> bool { - self.remaining_inbox_capacity() > 0 - } - - // pub fn issuable_guarantees(&self) -> u64 { - // self.remaining_inbox_capacity() as u64 - self.issued_guarantees - // } - // - // pub fn issue_all_guarantees(&mut self) -> u64 { - // let val = self.issuable_guarantees(); - // self.issued_guarantees += val; - // val - // } -} - fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 4470e83ad7..50e437ff52 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,5 +1,7 @@ use std::{ + cell::RefCell, collections::HashSet, + rc::Rc, sync::{Arc, Mutex}, }; @@ -7,6 +9,7 @@ use genawaiter::{ sync::{Co, Gen}, GeneratorState, }; +use iroh_net::NodeId; use tokio::sync::Notify; use tracing::{debug, info, warn}; @@ -17,27 +20,34 @@ use crate::{ wgps::{ AreaOfInterestHandle, CapabilityHandle, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindStaticToken, - StaticToken, StaticTokenHandle, + ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, + SetupBindStaticToken, StaticToken, StaticTokenHandle, }, willow::{AuthorisationToken, AuthorisedEntry}, }, - store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{Receiver, Sender, WriteOutcome}, + store::{ + actor::{CoroutineNotifier, Interest}, + ReadonlyStore, SplitAction, Store, SyncConfig, + }, + util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; -use super::{resource::ScopedResources, Channel, Error, Role, Scope}; +use super::{resource::ScopedResources, Error, Role, Scope}; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { - SendBufferFull(LogicalChannel), + ChannelPending(LogicalChannel, Interest), + ResourceMissing(ResourceHandle), } #[derive(derive_more::Debug)] -pub struct Coroutine { - pub store: Arc, +pub struct Coroutine { + pub peer: NodeId, + pub store_snapshot: Arc, + pub store_writer: Rc>, pub channels: Arc, pub state: SessionState, + pub notifier: CoroutineNotifier, #[debug(skip)] pub co: Co, } @@ -78,7 +88,7 @@ pub struct SessionStateInner { pub reconciliation_started: bool, pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pub pending_entries: Option, - pub notify_complete: Arc + pub notify_complete: Arc, } impl SessionStateInner { @@ -94,9 +104,12 @@ impl SessionStateInner { && self.pending_entries.is_none() } - pub fn trigger_notify_if_complete(&mut self) { + pub fn trigger_notify_if_complete(&mut self) -> bool { if self.is_complete() { - self.notify_complete.notify_waiters() + self.notify_complete.notify_waiters(); + true + } else { + false } } @@ -104,6 +117,12 @@ impl SessionStateInner { Arc::clone(&self.notify_complete) } + // fn get_resource(&self, scope: Scope, handle: impl Into) { + // match handle.into() { + // + // } + // } + pub fn setup_bind_area_of_interest( &mut self, msg: SetupBindAreaOfInterest, @@ -145,6 +164,22 @@ impl SessionStateInner { Ok(authorised_entry) } + // async fn get_static_token_or_yield( + // &mut self, + // handle: &StaticTokenHandle, + // ) -> Result { + // // loop { + // // match self + // // .their_resources + // // .static_tokens + // // .get(&static_token_handle) { + // // Ok(token) => return Ok(token.clone()), + // // Err(_)=> {} + // // } + // // } + // todo!() + // } + fn clear_pending_range_if_some( &mut self, our_handle: AreaOfInterestHandle, @@ -163,7 +198,7 @@ impl SessionStateInner { } } - fn bind_static_token( + fn bind_our_static_token( &mut self, static_token: StaticToken, ) -> anyhow::Result<(StaticTokenHandle, Option)> { @@ -217,9 +252,43 @@ impl SessionStateInner { // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. -impl Coroutine { - pub async fn init_reconciliation( +impl Coroutine { + pub async fn run( mut self, + init: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, + ) -> Result<(), Error> { + if let Some((our_handle, their_handle)) = init { + self.init_reconciliation(our_handle, their_handle).await?; + } + + while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { + let message = message?; + self.on_message(message).await?; + if self.state.lock().unwrap().trigger_notify_if_complete() { + break; + } + } + + Ok(()) + } + + pub async fn on_message(&mut self, message: Message) -> Result<(), Error> { + info!(%message, "recv"); + match message { + Message::ReconciliationSendFingerprint(message) => { + self.on_send_fingerprint(message).await? + } + Message::ReconciliationAnnounceEntries(message) => { + self.on_announce_entries(message).await? + } + Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, + _ => return Err(Error::UnsupportedMessage), + }; + Ok(()) + } + + pub async fn init_reconciliation( + &mut self, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, ) -> Result<(), Error> { @@ -242,14 +311,14 @@ impl Coroutine { let range = common_aoi.into_range(); state.reconciliation_started = true; drop(state); - let fingerprint = self.store.fingerprint(namespace, &range)?; + let fingerprint = self.store_snapshot.fingerprint(namespace, &range)?; self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) .await?; Ok(()) } pub async fn on_send_fingerprint( - mut self, + &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { debug!("on_send_fingerprint start"); @@ -268,7 +337,7 @@ impl Coroutine { state.range_is_authorised(&range, &our_handle, &their_handle)? }; - let our_fingerprint = self.store.fingerprint(namespace, &range)?; + let our_fingerprint = self.store_snapshot.fingerprint(namespace, &range)?; // case 1: fingerprint match. if our_fingerprint == their_fingerprint { @@ -306,7 +375,7 @@ impl Coroutine { Ok(()) } pub async fn on_announce_entries( - mut self, + &mut self, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { debug!("on_announce_entries start"); @@ -350,6 +419,73 @@ impl Coroutine { Ok(()) } + async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { + let ReconciliationSendEntry { + entry, + static_token_handle, + dynamic_token, + } = message; + info!("on_send_entry"); + + let mut state = self.state.lock().unwrap(); + + let remaining = state + .pending_entries + .as_mut() + .ok_or(Error::InvalidMessageInCurrentState)?; + info!(?remaining, "on_send_entry"); + *remaining -= 1; + if *remaining == 0 { + state.pending_entries = None; + } + drop(state); + + let static_token = loop { + let mut state = self.state.lock().unwrap(); + let token = state + .their_resources + .static_tokens + .get(&static_token_handle); + info!(?token, "loop get_static_token"); + // let token = token.clone(); + match token { + Ok(token) => break token.clone(), + Err(Error::MissingResource(handle)) => { + state.their_resources.register_notify( + handle, + self.notifier + .notifier(self.peer, Yield::ResourceMissing(handle)), + ); + drop(state); + self.co.yield_(Yield::ResourceMissing(handle)).await; + continue; + } + Err(err) => return Err(err), + } + }; + // .clone() {} + + let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); + let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; + // Ok(authorised_entry) + + // TODO: Remove clone + // match state.authorize_send_entry(message.clone()) { + // Ok(entry) => break entry, + // Err(Error::MissingResource(handle)) => { + // // state.their_resources.register_notify(handle, notify) + // self.co.yield_(YieldReason::ResourceMissing(handle)).await + // } + // Err(err) => return Err(err), + // } + // }; + self.store_writer + .borrow_mut() + .ingest_entry(&authorised_entry)?; + debug!("ingested entry"); + Ok(()) + } + // fn on_send_entry(&self, message: ReconciliationSendEntry) -> Result<(), Error> { // // Message::ReconciliationSendEntry(message) => { // // let ReconciliationSendEntry { @@ -402,12 +538,38 @@ impl Coroutine { break Ok(()); } WriteOutcome::BufferFull => { - self.co.yield_(Yield::SendBufferFull(channel)).await; + self.co + .yield_(Yield::ChannelPending(channel, Interest::Send)) + .await; } } } } + async fn recv(&self, channel: LogicalChannel) -> Option> { + let receiver = self.channels.receiver(channel); + loop { + match receiver.read_message_or_set_notify() { + Err(err) => return Some(Err(err)), + Ok(outcome) => match outcome { + ReadOutcome::Closed => { + debug!("recv: closed"); + return None; + } + ReadOutcome::ReadBufferEmpty => { + self.co + .yield_(Yield::ChannelPending(channel, Interest::Recv)) + .await; + } + ReadOutcome::Item(message) => { + debug!(?message, "recv"); + return Some(Ok(message)); + } + }, + } + } + } + async fn send_fingerprint( &mut self, range: ThreeDRange, @@ -447,7 +609,7 @@ impl Coroutine { } let our_count = match our_count { Some(count) => count, - None => self.store.count(namespace, &range)?, + None => self.store_snapshot.count(namespace, &range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), @@ -459,15 +621,21 @@ impl Coroutine { is_final_reply_for_range, }; self.send_reconciliation(msg).await?; - for authorised_entry in self.store.get_entries_with_authorisation(namespace, &range) { + for authorised_entry in self + .store_snapshot + .get_entries_with_authorisation(namespace, &range) + { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; // TODO avoid such frequent locking - let (static_token_handle, static_token_bind_msg) = - self.state.lock().unwrap().bind_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = self + .state + .lock() + .unwrap() + .bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { self.send_control(msg).await?; } @@ -491,7 +659,7 @@ impl Coroutine { // TODO: expose this config let config = SyncConfig::default(); { - let iter = self.store.split(namespace, &range, &config)?; + let iter = self.store_snapshot.split(namespace, &range, &config)?; // TODO: avoid collect let iter = iter.collect::>().into_iter(); let mut iter = iter.peekable(); diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 3e3fd6ec35..3a80e939ed 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -1,8 +1,11 @@ -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; -use crate::proto::wgps::{ - AreaOfInterestHandle, CapabilityHandle, Handle, ReadCapability, SetupBindAreaOfInterest, - StaticToken, StaticTokenHandle, +use crate::{ + proto::wgps::{ + AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, + SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, + }, + store::actor::Notifier, }; use super::Error; @@ -13,11 +16,32 @@ pub struct ScopedResources { pub areas_of_interest: ResourceMap, pub static_tokens: ResourceMap, } +impl ScopedResources { + pub fn register_notify(&mut self, handle: ResourceHandle, notify: Notifier) { + tracing::info!(?handle, "register_notify"); + match handle { + ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_notify(h, notify), + ResourceHandle::Capability(h) => self.capabilities.register_notify(h, notify), + ResourceHandle::StaticToken(h) => self.static_tokens.register_notify(h, notify), + ResourceHandle::Intersection(_h) => unimplemented!(), + } + } + + // pub fn get(&self, scope: Scope, handle: &Handle) { + // match handle { + // Handle::AreaOfInterest(h) => self.areas_of_interest.get(h), + // Handle::Intersection(h) => unimplemented!(), + // Handle::Capability(h) => self.capabilities.get(h), + // Handle::StaticToken(_h) => self.static_tokens.get(h), + // } + // } +} #[derive(Debug)] pub struct ResourceMap { next_handle: u64, map: HashMap>, + notify: HashMap>, } impl Default for ResourceMap { @@ -25,13 +49,14 @@ impl Default for ResourceMap { Self { next_handle: 0, map: Default::default(), + notify: Default::default(), } } } impl ResourceMap where - H: Handle, + H: IsHandle, R: Eq + PartialEq, { pub fn bind(&mut self, resource: R) -> H { @@ -39,9 +64,22 @@ where self.next_handle += 1; let resource = Resource::new(resource); self.map.insert(handle, resource); + tracing::info!(?handle, "bind"); + if let Some(mut notify) = self.notify.remove(&handle) { + tracing::info!(?handle, "notify {}", notify.len()); + for notify in notify.drain(..) { + if let Err(err) = notify.notify_sync() { + tracing::warn!(?err, "notify failed for {handle:?}"); + } + } + } handle } + pub fn register_notify(&mut self, handle: H, notifier: Notifier) { + self.notify.entry(handle).or_default().push_back(notifier) + } + pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { // TODO: Optimize / find out if reverse index is better than find_map if let Some(handle) = self @@ -61,7 +99,7 @@ where .get(handle) .as_ref() .map(|r| &r.value) - .ok_or(Error::MissingResource) + .ok_or_else(|| Error::MissingResource((*handle).into())) } } diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 026df183de..7ccca39e04 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -1,5 +1,7 @@ use std::{ + cell::RefCell, collections::{hash_map, HashMap, VecDeque}, + rc::Rc, sync::{Arc, Mutex}, thread::JoinHandle, }; @@ -21,7 +23,10 @@ use crate::{ proto::{ grouping::{NamespacedRange, ThreeDRange}, keys::NamespaceId, - wgps::{AreaOfInterestHandle, LogicalChannel, Message, ReconciliationSendEntry}, + wgps::{ + AreaOfInterestHandle, HandleType, LogicalChannel, Message, ReconciliationSendEntry, + ResourceHandle, + }, willow::{AuthorisedEntry, Entry}, }, session::{ @@ -44,36 +49,65 @@ pub struct StoreHandle { join_handle: Arc>>, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum Interest { Send, Recv, } +#[derive(Debug)] +pub struct CoroutineNotifier { + tx: flume::Sender, +} +impl CoroutineNotifier { + pub async fn notify(&self, peer: NodeId, notify: Yield) -> anyhow::Result<()> { + let msg = ToActor::Resume { peer, notify }; + self.tx.send_async(msg).await?; + Ok(()) + } + pub fn notify_sync(&self, peer: NodeId, notify: Yield) -> anyhow::Result<()> { + let msg = ToActor::Resume { peer, notify }; + self.tx.send(msg)?; + Ok(()) + } + pub fn notifier(&self, peer: NodeId, notify: Yield) -> Notifier { + Notifier { + tx: self.tx.clone(), + peer, + notify, + } + } +} + #[derive(Debug, Clone)] pub struct Notifier { - store: StoreHandle, + tx: flume::Sender, + notify: Yield, peer: NodeId, - channel: LogicalChannel, - direction: Interest, + // channel: LogicalChannel, + // direction: Interest, } impl Notifier { - pub fn channel(&self) -> LogicalChannel { - self.channel - } + // pub fn channel(&self) -> LogicalChannel { + // self.channel + // } pub async fn notify(&self) -> anyhow::Result<()> { - let msg = match self.direction { - Interest::Send => ToActor::ResumeSend { - peer: self.peer, - channel: self.channel, - }, - Interest::Recv => ToActor::ResumeRecv { - peer: self.peer, - channel: self.channel, - }, + // let notify = YieldReason::ChannelPending(self.channel, self.direction); + let msg = ToActor::Resume { + peer: self.peer, + notify: self.notify, }; - self.store.send(msg).await?; + self.tx.send_async(msg).await?; + Ok(()) + } + pub fn notify_sync(&self) -> anyhow::Result<()> { + // let notify = YieldReason::ChannelPending(self.channel, self.direction); + let msg = ToActor::Resume { + peer: self.peer, + notify: self.notify, + }; + self.tx.send(msg)?; Ok(()) } } @@ -81,6 +115,7 @@ impl Notifier { impl StoreHandle { pub fn spawn(store: S, me: NodeId) -> StoreHandle { let (tx, rx) = flume::bounded(CHANNEL_CAP); + let actor_tx = tx.clone(); let join_handle = std::thread::Builder::new() .name("sync-actor".to_string()) .spawn(move || { @@ -88,9 +123,10 @@ impl StoreHandle { let _enter = span.enter(); let mut actor = StorageThread { - store, + store: Rc::new(RefCell::new(store)), sessions: Default::default(), actor_rx: rx, + actor_tx, }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -104,17 +140,36 @@ impl StoreHandle { self.tx.send_async(action).await?; Ok(()) } - pub fn notifier( + pub fn send_blocking(&self, action: ToActor) -> anyhow::Result<()> { + self.tx.send(action)?; + Ok(()) + } + pub fn notifier_channel( &self, channel: LogicalChannel, direction: Interest, peer: NodeId, ) -> Notifier { + let notify = Yield::ChannelPending(channel, direction); + Notifier { + tx: self.tx.clone(), + peer, + notify, + } + } + pub fn notifier_resource(&self, peer: NodeId, handle: ResourceHandle) -> Notifier { + let notify = Yield::ResourceMissing(handle); + Notifier { + tx: self.tx.clone(), + notify, + peer, + } + } + pub fn notifier(&self, peer: NodeId, notify: Yield) -> Notifier { Notifier { - store: self.clone(), + tx: self.tx.clone(), peer, - channel, - direction, + notify, } } } @@ -144,14 +199,18 @@ pub enum ToActor { DropSession { peer: NodeId, }, - ResumeSend { - peer: NodeId, - channel: LogicalChannel, - }, - ResumeRecv { + Resume { peer: NodeId, - channel: LogicalChannel, + notify: Yield, }, + // ResumeSend { + // peer: NodeId, + // channel: LogicalChannel, + // }, + // ResumeRecv { + // peer: NodeId, + // channel: LogicalChannel, + // }, GetEntries { namespace: NamespaceId, #[debug(skip)] @@ -172,48 +231,41 @@ struct StorageSession { #[derive(derive_more::Debug, Default)] struct PendingCoroutines { - #[debug("{}", "on_control.len()")] - on_control: VecDeque, - #[debug("{}", "on_reconciliation.len()")] - on_reconciliation: VecDeque, + #[debug(skip)] + inner: HashMap>, // #[debug("{}", "on_control.len()")] + // on_control: VecDeque, + // #[debug("{}", "on_reconciliation.len()")] + // on_reconciliation: VecDeque, } impl PendingCoroutines { - fn get_mut(&mut self, channel: LogicalChannel) -> &mut VecDeque { - match channel { - LogicalChannel::Control => &mut self.on_control, - LogicalChannel::Reconciliation => &mut self.on_reconciliation, - } - } - fn get(&self, channel: LogicalChannel) -> &VecDeque { - match channel { - LogicalChannel::Control => &self.on_control, - LogicalChannel::Reconciliation => &self.on_reconciliation, - } + fn get_mut(&mut self, pending_on: Yield) -> &mut VecDeque { + self.inner.entry(pending_on).or_default() } - fn push_back(&mut self, channel: LogicalChannel, generator: ReconcileGen) { - self.get_mut(channel).push_back(generator); + fn push_back(&mut self, pending_on: Yield, generator: ReconcileGen) { + self.get_mut(pending_on).push_back(generator); } - fn push_front(&mut self, channel: LogicalChannel, generator: ReconcileGen) { - self.get_mut(channel).push_front(generator); + fn push_front(&mut self, pending_on: Yield, generator: ReconcileGen) { + self.get_mut(pending_on).push_front(generator); } - fn pop_front(&mut self, channel: LogicalChannel) -> Option { - self.get_mut(channel).pop_front() + fn pop_front(&mut self, pending_on: Yield) -> Option { + self.get_mut(pending_on).pop_front() } - fn len(&self, channel: LogicalChannel) -> usize { - self.get(channel).len() + fn len(&self, pending_on: &Yield) -> usize { + self.inner.get(pending_on).map(|v| v.len()).unwrap_or(0) } fn is_empty(&self) -> bool { - self.on_control.is_empty() && self.on_reconciliation.is_empty() + self.inner.values().any(|v| !v.is_empty()) } } #[derive(Debug)] pub struct StorageThread { - store: S, + store: Rc>, sessions: HashMap, actor_rx: flume::Receiver, + actor_tx: flume::Sender, } type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; @@ -255,29 +307,20 @@ impl StorageThread { pending: Default::default(), }; self.sessions.insert(peer, session); - if let Some((our_handle, their_handle)) = start { - self.start_coroutine(peer, |routine| { - routine - .init_reconciliation(our_handle, their_handle) - .boxed_local() - })?; - } - self.resume_recv(peer, LogicalChannel::Reconciliation)?; - self.resume_send(peer, LogicalChannel::Reconciliation)?; - self.resume_send(peer, LogicalChannel::Control)?; + self.start_coroutine(peer, |routine| routine.run(start).boxed_local())?; } ToActor::DropSession { peer } => { self.sessions.remove(&peer); } - ToActor::ResumeSend { peer, channel } => { - self.resume_send(peer, channel)?; - } - ToActor::ResumeRecv { peer, channel } => { - self.resume_recv(peer, channel)?; + ToActor::Resume { peer, notify } => { + self.resume_yielded(peer, notify)?; } + // ToActor::ResumeRecv { peer, channel } => { + // self.resume_recv(peer, channel)?; + // } ToActor::GetEntries { namespace, reply } => { - let entries = self - .store + let store = self.store.borrow(); + let entries = store .get_entries(namespace, &ThreeDRange::full()) .filter_map(|r| r.ok()); for entry in entries { @@ -294,63 +337,27 @@ impl StorageThread { fn session(&mut self, peer: &NodeId) -> Result<&StorageSession, Error> { self.sessions.get(peer).ok_or(Error::SessionNotFound) } - fn on_message(&mut self, peer: NodeId, message: Message) -> Result<(), Error> { - info!(msg=%message, "recv"); - match message { - Message::ReconciliationSendFingerprint(message) => { - self.start_coroutine(peer, |routine| { - routine.on_send_fingerprint(message).boxed_local() - })?; - } - Message::ReconciliationAnnounceEntries(message) => { - self.start_coroutine(peer, |routine| { - routine.on_announce_entries(message).boxed_local() - })?; - } - Message::ReconciliationSendEntry(message) => { - let session = self.session_mut(&peer)?; - let authorised_entry = { - let mut state = session.state.lock().unwrap(); - let authorised_entry = state.authorize_send_entry(message)?; - state.trigger_notify_if_complete(); - authorised_entry - }; - self.store.ingest_entry(&authorised_entry)?; - debug!("ingested entry"); - } - _ => return Err(Error::UnsupportedMessage), - } - let session = self.session(&peer)?; - let state = session.state.lock().unwrap(); - let started = state.reconciliation_started; - let pending_ranges = &state.pending_ranges; - let pending_entries = &state.pending_entries; - let is_complete = state.is_complete(); - info!( - is_complete, - started, - ?pending_entries, - ?pending_ranges, - "handled" - ); - - Ok(()) - } - fn start_coroutine( &mut self, peer: NodeId, - producer: impl FnOnce(Coroutine) -> ReconcileFut, + producer: impl FnOnce(Coroutine) -> ReconcileFut, ) -> Result<(), Error> { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; - let snapshot = Arc::new(self.store.snapshot()?); + let snapshot = Arc::new(self.store.borrow_mut().snapshot()?); let channels = session.channels.clone(); let state = session.state.clone(); + let store_writer = Rc::clone(&self.store); + let notifier = CoroutineNotifier { + tx: self.actor_tx.clone(), + }; let generator = Gen::new(move |co| { let routine = Coroutine { - store: snapshot, + peer, + store_snapshot: snapshot, + store_writer, + notifier, channels, state, co, @@ -360,35 +367,35 @@ impl StorageThread { self.resume_coroutine(peer, generator) } - #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] - fn resume_recv(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { - let session = self.session(&peer)?; - debug!("resume"); - let channel = session.channels.receiver(channel).clone(); - loop { - match channel.read_message_or_set_notify()? { - ReadOutcome::Closed => { - debug!("yield: Closed"); - break; - } - ReadOutcome::ReadBufferEmpty => { - debug!("yield: ReadBufferEmpty"); - break; - } - ReadOutcome::Item(message) => { - debug!(?message, "recv"); - self.on_message(peer, message)?; - } - } - } - Ok(()) - } + // #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] + // fn resume_recv(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + // let session = self.session(&peer)?; + // debug!("resume"); + // let channel = session.channels.receiver(channel).clone(); + // loop { + // match channel.read_message_or_set_notify()? { + // ReadOutcome::Closed => { + // debug!("yield: Closed"); + // break; + // } + // ReadOutcome::ReadBufferEmpty => { + // debug!("yield: ReadBufferEmpty"); + // break; + // } + // ReadOutcome::Item(message) => { + // debug!(?message, "recv"); + // self.on_message(peer, message)?; + // } + // } + // } + // Ok(()) + // } - #[instrument(skip_all, fields(session=%peer.fmt_short(), ch=%channel.fmt_short()))] - fn resume_send(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { + #[instrument(skip_all, fields(session=%peer.fmt_short()))] + fn resume_yielded(&mut self, peer: NodeId, notify: Yield) -> Result<(), Error> { let session = self.session_mut(&peer)?; - debug!(pending = session.pending.len(channel), "resume"); - let generator = session.pending.pop_front(channel); + debug!(pending = session.pending.len(¬ify), "resume"); + let generator = session.pending.pop_front(notify); match generator { Some(generator) => self.resume_coroutine(peer, generator), None => { @@ -402,18 +409,86 @@ impl StorageThread { debug!(session = peer.fmt_short(), "resume"); let session = self.session_mut(&peer)?; match generator.resume() { - GeneratorState::Yielded(why) => match why { - Yield::SendBufferFull(channel) => { - debug!("yield: SendBufferFull"); - session.pending.push_back(channel, generator); - Ok(()) - } - }, + GeneratorState::Yielded(reason) => { + info!(?reason, "yield"); + // match &reason { + // YieldReason::ResourceMissing(handle) => { + // // match handle.ty + // // self.actor_rx.s + // let notifier = Notifier { + // peer, + // tx, + // notify: YieldReason::ResourceMissing(*handle), + // }; + // session + // .state + // .lock() + // .unwrap() + // .their_resources + // .register_notify(*handle, notifier); + // } + // _ => {} + // } + session.pending.push_back(reason, generator); + Ok(()) + } GeneratorState::Complete(res) => { - debug!(?res, "done"); - session.state.lock().unwrap().trigger_notify_if_complete(); + info!(?res, "complete"); res } } } } + +// #[derive(Debug, Clone, Hash, Eq, PartialEq)] +// enum PendingOn { +// Channel { +// channel: LogicalChannel, +// interest: Interest, +// }, +// Resource { +// handle: ResourceHandle, +// }, +// } +// fn on_message(&mut self, peer: NodeId, message: Message) -> Result<(), Error> { +// info!(msg=%message, "recv"); +// match message { +// Message::ReconciliationSendFingerprint(message) => { +// self.start_coroutine(peer, |routine| { +// routine.on_send_fingerprint(message).boxed_local() +// })?; +// } +// Message::ReconciliationAnnounceEntries(message) => { +// self.start_coroutine(peer, |routine| { +// routine.on_announce_entries(message).boxed_local() +// })?; +// } +// Message::ReconciliationSendEntry(message) => { +// let session = self.session_mut(&peer)?; +// let authorised_entry = { +// let mut state = session.state.lock().unwrap(); +// let authorised_entry = state.authorize_send_entry(message)?; +// state.trigger_notify_if_complete(); +// authorised_entry +// }; +// self.store.ingest_entry(&authorised_entry)?; +// debug!("ingested entry"); +// } +// _ => return Err(Error::UnsupportedMessage), +// } +// let session = self.session(&peer)?; +// let state = session.state.lock().unwrap(); +// let started = state.reconciliation_started; +// let pending_ranges = &state.pending_ranges; +// let pending_entries = &state.pending_entries; +// let is_complete = state.is_complete(); +// info!( +// is_complete, +// started, +// ?pending_entries, +// ?pending_ranges, +// "handled" +// ); +// +// Ok(()) +// } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index b49689ca99..7f3d2c55bc 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -21,5 +21,5 @@ pub trait Decoder: Sized { #[derive(Debug)] pub enum DecodeOutcome { NeedMoreData, - Decoded { item: T, consumed: usize } + Decoded { item: T, consumed: usize }, } diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index ece52bf8d4..46344a5aad 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -21,7 +21,7 @@ struct Shared { write_blocked: bool, need_read_notify: bool, need_write_notify: bool, - closed: bool + closed: bool, } impl Shared { @@ -34,11 +34,13 @@ impl Shared { write_blocked: false, need_read_notify: false, need_write_notify: false, - closed: false + closed: false, } } fn close(&mut self) { self.closed = true; + self.notify_writable.notify_waiters(); + self.notify_readable.notify_waiters(); } fn closed(&self) -> bool { self.closed @@ -100,11 +102,14 @@ impl Shared { fn read_message(&mut self) -> anyhow::Result> { let data = self.read_slice(); - if self.closed() { - return Ok(ReadOutcome::Closed); - } let res = match T::decode_from(data)? { - DecodeOutcome::NeedMoreData => ReadOutcome::ReadBufferEmpty, + DecodeOutcome::NeedMoreData => { + if self.closed() { + ReadOutcome::Closed + } else { + ReadOutcome::ReadBufferEmpty + } + } DecodeOutcome::Decoded { item, consumed } => { self.read_advance(consumed); ReadOutcome::Item(item) @@ -162,12 +167,12 @@ impl Receiver { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - if shared.closed() { - return None; - } if !shared.read_buf_empty() { return Some(shared.read_bytes()); } + if shared.closed() { + return None; + } shared.notify_readable.clone() }; notify.notified().await @@ -199,17 +204,20 @@ impl Receiver { notify.notified().await } - pub async fn read_message_async(&self) -> anyhow::Result> { + pub async fn read_message_async(&self) -> Option> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - match shared.read_message()? { - ReadOutcome::ReadBufferEmpty => shared.notify_readable.clone(), - ReadOutcome::Closed => return Ok(None), - ReadOutcome::Item(item) => { - // debug!("read_message_async read"); - return Ok(Some(item)); - } + match shared.read_message() { + Err(err) => return Some(Err(err)), + Ok(outcome) => match outcome { + ReadOutcome::ReadBufferEmpty => shared.notify_readable.clone(), + ReadOutcome::Closed => return None, + ReadOutcome::Item(item) => { + // debug!("read_message_async read"); + return Some(Ok(item)); + } + }, } }; // debug!("read_message_async NeedMoreData wait"); From 7c8761ee7c5c27ccae6e073b7ecc4d2fc6dac97f Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 00:39:13 +0200 Subject: [PATCH 017/198] better --- iroh-willow/src/net.rs | 22 ++-- iroh-willow/src/session.rs | 185 ++++++--------------------- iroh-willow/src/session/coroutine.rs | 132 ++++++++++++++++--- iroh-willow/src/util/channel.rs | 2 +- 4 files changed, 160 insertions(+), 181 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 026564ecca..d99f7d57fd 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -17,8 +17,8 @@ use crate::{ MAX_PAYLOAD_SIZE_POWER, }, session::{ - coroutine::{Channels, Yield}, - Role, Session, SessionInit, + coroutine::{Channels, SessionStateInner, Yield}, + ControlLoop, Role, SessionInit, }, store::actor::{Interest, Notifier, StoreHandle, ToActor}, util::{ @@ -102,20 +102,18 @@ pub async fn run( reconciliation_send, reconciliation_recv, }; - - let mut session = Session::new( - peer, + let state = SessionStateInner::new( our_role, + peer, our_nonce, - max_payload_size, received_commitment, - init, - channels.clone(), - store.clone(), + max_payload_size, ); + let on_complete = state.notify_complete(); + + let control_loop = ControlLoop::new(state, channels.clone(), store.clone(), init); - let on_complete = session.notify_complete(); - let session_fut = async move { session.run_control().await }; + let control_fut = control_loop.run(); let notified_fut = async move { on_complete.notified().await; @@ -123,7 +121,7 @@ pub async fn run( channels.close_send(); Ok(()) }; - join_set.spawn(session_fut.map_err(anyhow::Error::from)); + join_set.spawn(control_fut.map_err(anyhow::Error::from)); join_set.spawn(notified_fut); while let Some(res) = join_set.join_next().await { res??; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 8199ba264b..68e6231c02 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -93,6 +93,12 @@ pub enum Role { Alfie, } +#[derive(Copy, Clone, Debug)] +pub enum Scope { + Ours, + Theirs, +} + #[derive(Debug)] pub struct SessionInit { pub user_secret_key: UserSecretKey, @@ -103,7 +109,7 @@ pub struct SessionInit { } #[derive(Debug)] -enum ChallengeState { +pub enum ChallengeState { Committed { our_nonce: AccessChallenge, received_commitment: ChallengeHash, @@ -164,188 +170,77 @@ impl ChallengeState { } #[derive(Debug)] -pub struct Session { - peer: NodeId, - our_role: Role, - _their_maximum_payload_size: usize, +pub struct ControlLoop { init: SessionInit, - challenge: ChallengeState, channels: Arc, state: SessionState, - our_current_aoi: Option, store_handle: StoreHandle, } -impl Session { +impl ControlLoop { pub fn new( - peer: NodeId, - our_role: Role, - our_nonce: AccessChallenge, - their_maximum_payload_size: usize, - received_commitment: ChallengeHash, - init: SessionInit, + state: SessionStateInner, channels: Channels, store_handle: StoreHandle, + init: SessionInit, ) -> Self { - let challenge_state = ChallengeState::Committed { - our_nonce, - received_commitment, - }; - let state = SessionStateInner::default(); - let this = Self { - peer, - our_role, - _their_maximum_payload_size: their_maximum_payload_size, - challenge: challenge_state, - our_current_aoi: None, // config + Self { init, channels: Arc::new(channels), state: Arc::new(Mutex::new(state)), store_handle, - }; - let msg = CommitmentReveal { nonce: our_nonce }; - this.channels - .sender(LogicalChannel::Control) - .send(&msg.into()) - .expect("channel not empty at start"); - this - } - - pub fn notify_complete(&self) -> Arc { - self.state.lock().unwrap().notify_complete() - } - - pub fn our_role(&self) -> Role { - self.our_role - } - - #[instrument(skip_all)] - pub async fn run_control(&mut self) -> Result<(), Error> { - loop { - info!("wait recv"); - let message = self - .channels - .receiver(LogicalChannel::Control) - .read_message_async() - .await; - match message { - None => break, - Some(message) => { - let message = message?; - info!(%message, "recv"); - self.process_control(message).await?; - let is_complete = self.state.lock().unwrap().is_complete(); - debug!(session=%self.peer.fmt_short(), is_complete, "handled"); - } - } } - debug!("run_control finished"); - Ok(()) } - async fn send_control(&self, message: impl Into) -> Result<(), Error> { - let message: Message = message.into(); + #[instrument(skip_all)] + pub async fn run(mut self) -> Result<(), Error> { + let reveal_message = self.state.lock().unwrap().commitment_reveal()?; self.channels - .sender(LogicalChannel::Control) - .send_async(&message) + .control_send + .send_async(&reveal_message) .await?; - info!(msg=%message, "sent"); - Ok(()) - } - - async fn setup(&mut self) -> Result<(), Error> { - let init = &self.init; - let area_of_interest = init.area_of_interest.clone(); - let capability = init.capability.clone(); - - debug!(?init, "init"); - if *capability.receiver() != init.user_secret_key.public_key() { - return Err(Error::WrongSecretKeyForCapability); + while let Some(message) = self.channels.control_recv.recv_async().await { + let message = message?; + info!(%message, "recv"); + self.on_control_message(message).await?; } - - // TODO: implement private area intersection - let intersection_handle = 0.into(); - - // register read capability - let signature = self.challenge.sign(&init.user_secret_key)?; - let our_capability_handle = self - .state - .lock() - .unwrap() - .our_resources - .capabilities - .bind(capability.clone()); - let msg = SetupBindReadCapability { - capability, - handle: intersection_handle, - signature, - }; - self.send_control(msg).await?; - - // register area of interest - let msg = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - self.send_control(msg.clone()).await?; - let our_aoi_handle = self - .state - .lock() - .unwrap() - .our_resources - .areas_of_interest - .bind(msg.clone()); - self.our_current_aoi = Some(our_aoi_handle); - + debug!("run_control finished"); Ok(()) } - async fn process_control(&mut self, message: Message) -> Result<(), Error> { + async fn on_control_message(&mut self, message: Message) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { - self.challenge.reveal(self.our_role, msg.nonce)?; - self.setup().await?; + let setup_messages = self + .state + .lock() + .unwrap() + .on_commitment_reveal(msg, &self.init)?; + for message in setup_messages { + self.channels.control_send.send_async(&message).await?; + info!(%message, "sent"); + } } Message::SetupBindReadCapability(msg) => { - msg.capability.validate()?; - self.challenge - .verify(msg.capability.receiver(), &msg.signature)?; - // TODO: verify intersection handle self.state .lock() .unwrap() - .their_resources - .capabilities - .bind(msg.capability); + .on_setup_bind_read_capability(msg)?; } Message::SetupBindStaticToken(msg) => { - self.state - .lock() - .unwrap() - .their_resources - .static_tokens - .bind(msg.static_token); + self.state.lock().unwrap().on_setup_bind_static_token(msg); } Message::SetupBindAreaOfInterest(msg) => { - let their_handle = self + let (peer, start) = self .state .lock() .unwrap() - .setup_bind_area_of_interest(msg)?; - let start = if self.our_role == Role::Alfie { - let our_handle = self - .our_current_aoi - .clone() - .ok_or(Error::InvalidMessageInCurrentState)?; - Some((our_handle, their_handle)) - } else { - None - }; + .on_setup_bind_area_of_interest(msg)?; let message = ToActor::InitSession { - peer: self.peer, state: self.state.clone(), channels: self.channels.clone(), start, + peer, }; self.store_handle.send(message).await?; } @@ -358,12 +253,6 @@ impl Session { } } -#[derive(Copy, Clone, Debug)] -pub enum Scope { - Ours, - Theirs, -} - fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 50e437ff52..580c2f0d86 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -18,10 +18,7 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceId, NamespacePublicKey}, wgps::{ - AreaOfInterestHandle, CapabilityHandle, Fingerprint, LengthyEntry, LogicalChannel, - Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, - SetupBindStaticToken, StaticToken, StaticTokenHandle, + AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle }, willow::{AuthorisationToken, AuthorisedEntry}, }, @@ -32,7 +29,7 @@ use crate::{ util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; -use super::{resource::ScopedResources, Error, Role, Scope}; +use super::{resource::ScopedResources, ChallengeState, Error, Role, Scope, SessionInit}; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { @@ -81,17 +78,45 @@ impl Channels { } } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct SessionStateInner { - pub our_resources: ScopedResources, - pub their_resources: ScopedResources, - pub reconciliation_started: bool, - pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pub pending_entries: Option, - pub notify_complete: Arc, + our_role: Role, + peer: NodeId, + our_resources: ScopedResources, + their_resources: ScopedResources, + reconciliation_started: bool, + pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pending_entries: Option, + notify_complete: Arc, + challenge: ChallengeState, + our_current_aoi: Option, } impl SessionStateInner { + pub fn new( + our_role: Role, + peer: NodeId, + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + _their_maximum_payload_size: usize, + ) -> Self { + let challenge_state = ChallengeState::Committed { + our_nonce, + received_commitment, + }; + Self { + our_role, + peer, + challenge: challenge_state, + reconciliation_started: false, + our_resources: Default::default(), + their_resources: Default::default(), + pending_ranges: Default::default(), + pending_entries: Default::default(), + notify_complete: Default::default(), + our_current_aoi: Default::default(), + } + } fn resources(&self, scope: Scope) -> &ScopedResources { match scope { Scope::Ours => &self.our_resources, @@ -117,23 +142,90 @@ impl SessionStateInner { Arc::clone(&self.notify_complete) } - // fn get_resource(&self, scope: Scope, handle: impl Into) { - // match handle.into() { - // - // } - // } + pub fn commitment_reveal(&mut self) -> Result { + match self.challenge { + ChallengeState::Committed { our_nonce, .. } => { + Ok(CommitmentReveal { nonce: our_nonce }.into()) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + // let msg = CommitmentReveal { nonce: our_nonce }; + } + + pub fn on_commitment_reveal( + &mut self, + msg: CommitmentReveal, + init: &SessionInit, + ) -> Result<[Message; 2], Error> { + self.challenge.reveal(self.our_role, msg.nonce)?; + self.setup(init) + } + + pub fn on_setup_bind_read_capability( + &mut self, + msg: SetupBindReadCapability, + ) -> Result<(), Error> { + // TODO: verify intersection handle + msg.capability.validate()?; + self.challenge + .verify(msg.capability.receiver(), &msg.signature)?; + self.their_resources.capabilities.bind(msg.capability); + Ok(()) + } + + pub fn on_setup_bind_static_token(&mut self, msg: SetupBindStaticToken) { + self.their_resources.static_tokens.bind(msg.static_token); + } + + fn setup(&mut self, init: &SessionInit) -> Result<[Message; 2], Error> { + let area_of_interest = init.area_of_interest.clone(); + let capability = init.capability.clone(); + + debug!(?init, "init"); + if *capability.receiver() != init.user_secret_key.public_key() { + return Err(Error::WrongSecretKeyForCapability); + } + + // TODO: implement private area intersection + let intersection_handle = 0.into(); + let signature = self.challenge.sign(&init.user_secret_key)?; - pub fn setup_bind_area_of_interest( + let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); + let msg1 = SetupBindReadCapability { + capability, + handle: intersection_handle, + signature, + }; + + let msg2 = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, + }; + let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg2.clone()); + self.our_current_aoi = Some(our_aoi_handle); + Ok([msg1.into(), msg2.into()]) + } + + pub fn on_setup_bind_area_of_interest( &mut self, msg: SetupBindAreaOfInterest, - ) -> Result { + ) -> Result<(NodeId, Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), Error> { let capability = self .resources(Scope::Theirs) .capabilities .get(&msg.authorisation)?; capability.try_granted_area(&msg.area_of_interest.area)?; let their_handle = self.their_resources.areas_of_interest.bind(msg); - Ok(their_handle) + let start = if self.our_role == Role::Alfie { + let our_handle = self + .our_current_aoi + .clone() + .ok_or(Error::InvalidMessageInCurrentState)?; + Some((our_handle, their_handle)) + } else { + None + }; + Ok((self.peer, start)) } pub fn authorize_send_entry( diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 46344a5aad..43a6953cd7 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -204,7 +204,7 @@ impl Receiver { notify.notified().await } - pub async fn read_message_async(&self) -> Option> { + pub async fn recv_async(&self) -> Option> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); From 788fe3575233b897dc45918b0d3e82124c187c34 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 01:53:28 +0200 Subject: [PATCH 018/198] things work!! --- iroh-willow/src/net.rs | 30 +++- iroh-willow/src/proto.rs | 1 + iroh-willow/src/proto/challenge.rs | 90 ++++++++++ iroh-willow/src/session.rs | 250 +++++++++------------------ iroh-willow/src/session/coroutine.rs | 205 ++++++++++++---------- iroh-willow/src/session/resource.rs | 9 + iroh-willow/src/store/actor.rs | 200 +++++++-------------- 7 files changed, 376 insertions(+), 409 deletions(-) create mode 100644 iroh-willow/src/proto/challenge.rs diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index d99f7d57fd..a1409d6d22 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,4 +1,8 @@ -use std::{pin::Pin, sync::Arc, task::Poll}; +use std::{ + pin::Pin, + sync::{Arc, Mutex}, + task::Poll, +}; use anyhow::{anyhow, ensure, Context}; use futures::{FutureExt, SinkExt, Stream, TryFutureExt}; @@ -17,8 +21,8 @@ use crate::{ MAX_PAYLOAD_SIZE_POWER, }, session::{ - coroutine::{Channels, SessionStateInner, Yield}, - ControlLoop, Role, SessionInit, + coroutine::{Channels, Readyness, SessionStateInner, Yield}, + Role, SessionInit, }, store::actor::{Interest, Notifier, StoreHandle, ToActor}, util::{ @@ -111,9 +115,17 @@ pub async fn run( ); let on_complete = state.notify_complete(); - let control_loop = ControlLoop::new(state, channels.clone(), store.clone(), init); - - let control_fut = control_loop.run(); + // let control_loop = ControlLoop::new(state, channels.clone(), store.clone(), init); + // + // let control_fut = control_loop.run(); + store + .send(ToActor::InitSession { + peer, + state: Arc::new(Mutex::new(state)), + channels: channels.clone(), + init, + }) + .await?; let notified_fut = async move { on_complete.notified().await; @@ -121,7 +133,7 @@ pub async fn run( channels.close_send(); Ok(()) }; - join_set.spawn(control_fut.map_err(anyhow::Error::from)); + // join_set.spawn(control_fut.map_err(anyhow::Error::from)); join_set.spawn(notified_fut); while let Some(res) = join_set.join_next().await { res??; @@ -144,7 +156,7 @@ fn spawn_channel( let recv_fut = recv_loop( recv_stream, recv_tx, - store.notifier(peer, Yield::ChannelPending(ch, Interest::Recv)), + store.notifier(peer, Readyness::Channel(ch, Interest::Recv)), ) .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); @@ -153,7 +165,7 @@ fn spawn_channel( let send_fut = send_loop( send_stream, send_rx, - store.notifier(peer, Yield::ChannelPending(ch, Interest::Send)), + store.notifier(peer, Readyness::Channel(ch, Interest::Send)), ) .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index d3c5e179e2..cd3dc110f4 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -3,3 +3,4 @@ pub mod keys; pub mod meadowcap; pub mod wgps; pub mod willow; +pub mod challenge; diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs new file mode 100644 index 0000000000..548ce32c68 --- /dev/null +++ b/iroh-willow/src/proto/challenge.rs @@ -0,0 +1,90 @@ +use iroh_base::hash::Hash; + +use crate::session::{Error, Role}; + +use super::{keys::{UserPublicKey, UserSecretKey, UserSignature}, wgps::{AccessChallenge, ChallengeHash}}; + +#[derive(Debug)] +pub enum ChallengeState { + Committed { + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + }, + Revealed { + ours: AccessChallenge, + theirs: AccessChallenge, + }, +} + +impl ChallengeState { + pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { + match self { + Self::Committed { + our_nonce, + received_commitment, + } => { + if Hash::new(&their_nonce).as_bytes() != received_commitment { + return Err(Error::BrokenCommittement); + } + let ours = match our_role { + Role::Alfie => bitwise_xor(*our_nonce, their_nonce), + Role::Betty => bitwise_xor_complement(*our_nonce, their_nonce), + }; + let theirs = bitwise_complement(ours); + *self = Self::Revealed { ours, theirs }; + Ok(()) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + pub fn sign(&self, secret_key: &UserSecretKey) -> Result { + let challenge = self.get_ours()?; + let signature = secret_key.sign(challenge); + Ok(signature) + } + + pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { + let their_challenge = self.get_theirs()?; + user_key.verify(their_challenge, &signature)?; + Ok(()) + } + + fn get_ours(&self) -> Result<&AccessChallenge, Error> { + match self { + Self::Revealed { ours, .. } => Ok(&ours), + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + fn get_theirs(&self) -> Result<&AccessChallenge, Error> { + match self { + Self::Revealed { theirs, .. } => Ok(&theirs), + _ => Err(Error::InvalidMessageInCurrentState), + } + } +} + +fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = x1 ^ x2; + } + res +} + +fn bitwise_complement(a: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, x) in a.iter().enumerate() { + res[i] = !x; + } + res +} + +fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = !(x1 ^ x2); + } + res +} diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 68e6231c02..1bef828780 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -108,171 +108,87 @@ pub struct SessionInit { pub area_of_interest: AreaOfInterest, } -#[derive(Debug)] -pub enum ChallengeState { - Committed { - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - }, - Revealed { - ours: AccessChallenge, - theirs: AccessChallenge, - }, -} +// #[derive(Debug)] +// pub struct ControlLoop { +// init: SessionInit, +// channels: Arc, +// state: SessionState, +// store_handle: StoreHandle, +// } +// +// impl ControlLoop { +// pub fn new( +// state: SessionStateInner, +// channels: Channels, +// store_handle: StoreHandle, +// init: SessionInit, +// ) -> Self { +// Self { +// init, +// channels: Arc::new(channels), +// state: Arc::new(Mutex::new(state)), +// store_handle, +// } +// } +// +// #[instrument(skip_all)] +// pub async fn run(mut self) -> Result<(), Error> { +// let reveal_message = self.state.lock().unwrap().commitment_reveal()?; +// self.channels +// .control_send +// .send_async(&reveal_message) +// .await?; +// while let Some(message) = self.channels.control_recv.recv_async().await { +// let message = message?; +// info!(%message, "recv"); +// self.on_control_message(message).await?; +// } +// debug!("run_control finished"); +// Ok(()) +// } +// +// async fn on_control_message(&mut self, message: Message) -> Result<(), Error> { +// match message { +// Message::CommitmentReveal(msg) => { +// let setup_messages = self +// .state +// .lock() +// .unwrap() +// .on_commitment_reveal(msg, &self.init)?; +// for message in setup_messages { +// self.channels.control_send.send_async(&message).await?; +// info!(%message, "sent"); +// } +// } +// Message::SetupBindReadCapability(msg) => { +// self.state +// .lock() +// .unwrap() +// .on_setup_bind_read_capability(msg)?; +// } +// Message::SetupBindStaticToken(msg) => { +// self.state.lock().unwrap().on_setup_bind_static_token(msg); +// } +// Message::SetupBindAreaOfInterest(msg) => { +// let (peer, start) = self +// .state +// .lock() +// .unwrap() +// .on_setup_bind_area_of_interest(msg)?; +// let message = ToActor::InitSession { +// state: self.state.clone(), +// channels: self.channels.clone(), +// start, +// peer, +// }; +// self.store_handle.send(message).await?; +// } +// Message::ControlFreeHandle(_msg) => { +// // TODO: Free handles +// } +// _ => return Err(Error::UnsupportedMessage), +// } +// Ok(()) +// } +// } -impl ChallengeState { - pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { - match self { - Self::Committed { - our_nonce, - received_commitment, - } => { - if Hash::new(&their_nonce).as_bytes() != received_commitment { - return Err(Error::BrokenCommittement); - } - let ours = match our_role { - Role::Alfie => bitwise_xor(*our_nonce, their_nonce), - Role::Betty => bitwise_xor_complement(*our_nonce, their_nonce), - }; - let theirs = bitwise_complement(ours); - *self = Self::Revealed { ours, theirs }; - Ok(()) - } - _ => Err(Error::InvalidMessageInCurrentState), - } - } - - pub fn sign(&self, secret_key: &UserSecretKey) -> Result { - let challenge = self.get_ours()?; - let signature = secret_key.sign(challenge); - Ok(signature) - } - - pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { - let their_challenge = self.get_theirs()?; - user_key.verify(their_challenge, &signature)?; - Ok(()) - } - - fn get_ours(&self) -> Result<&AccessChallenge, Error> { - match self { - Self::Revealed { ours, .. } => Ok(&ours), - _ => Err(Error::InvalidMessageInCurrentState), - } - } - - fn get_theirs(&self) -> Result<&AccessChallenge, Error> { - match self { - Self::Revealed { theirs, .. } => Ok(&theirs), - _ => Err(Error::InvalidMessageInCurrentState), - } - } -} - -#[derive(Debug)] -pub struct ControlLoop { - init: SessionInit, - channels: Arc, - state: SessionState, - store_handle: StoreHandle, -} - -impl ControlLoop { - pub fn new( - state: SessionStateInner, - channels: Channels, - store_handle: StoreHandle, - init: SessionInit, - ) -> Self { - Self { - init, - channels: Arc::new(channels), - state: Arc::new(Mutex::new(state)), - store_handle, - } - } - - #[instrument(skip_all)] - pub async fn run(mut self) -> Result<(), Error> { - let reveal_message = self.state.lock().unwrap().commitment_reveal()?; - self.channels - .control_send - .send_async(&reveal_message) - .await?; - while let Some(message) = self.channels.control_recv.recv_async().await { - let message = message?; - info!(%message, "recv"); - self.on_control_message(message).await?; - } - debug!("run_control finished"); - Ok(()) - } - - async fn on_control_message(&mut self, message: Message) -> Result<(), Error> { - match message { - Message::CommitmentReveal(msg) => { - let setup_messages = self - .state - .lock() - .unwrap() - .on_commitment_reveal(msg, &self.init)?; - for message in setup_messages { - self.channels.control_send.send_async(&message).await?; - info!(%message, "sent"); - } - } - Message::SetupBindReadCapability(msg) => { - self.state - .lock() - .unwrap() - .on_setup_bind_read_capability(msg)?; - } - Message::SetupBindStaticToken(msg) => { - self.state.lock().unwrap().on_setup_bind_static_token(msg); - } - Message::SetupBindAreaOfInterest(msg) => { - let (peer, start) = self - .state - .lock() - .unwrap() - .on_setup_bind_area_of_interest(msg)?; - let message = ToActor::InitSession { - state: self.state.clone(), - channels: self.channels.clone(), - start, - peer, - }; - self.store_handle.send(message).await?; - } - Message::ControlFreeHandle(_msg) => { - // TODO: Free handles - } - _ => return Err(Error::UnsupportedMessage), - } - Ok(()) - } -} - -fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { - res[i] = x1 ^ x2; - } - res -} - -fn bitwise_complement(a: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, x) in a.iter().enumerate() { - res[i] = !x; - } - res -} - -fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { - res[i] = !(x1 ^ x2); - } - res -} diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 580c2f0d86..eb2c584cef 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -15,10 +15,15 @@ use tracing::{debug, info, warn}; use crate::{ proto::{ + challenge::ChallengeState, grouping::ThreeDRange, keys::{NamespaceId, NamespacePublicKey}, wgps::{ - AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle + AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, + CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, + ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, + ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, + StaticToken, StaticTokenHandle, }, willow::{AuthorisationToken, AuthorisedEntry}, }, @@ -29,20 +34,32 @@ use crate::{ util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; -use super::{resource::ScopedResources, ChallengeState, Error, Role, Scope, SessionInit}; +use super::{resource::ScopedResources, Error, Role, Scope, SessionInit}; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { - ChannelPending(LogicalChannel, Interest), - ResourceMissing(ResourceHandle), + Pending(Readyness), + StartReconciliation(Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), +} +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum Readyness { + Channel(LogicalChannel, Interest), + Resource(ResourceHandle), } +// #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +// pub enum NotifyCoroutine { +// StartReconciliation(Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), +// ChannelReady(LogicalChannel, Interest), +// ResourceReady(ResourceHandle), +// } + #[derive(derive_more::Debug)] pub struct Coroutine { pub peer: NodeId, pub store_snapshot: Arc, pub store_writer: Rc>, - pub channels: Arc, + pub channels: Channels, pub state: SessionState, pub notifier: CoroutineNotifier, #[debug(skip)] @@ -256,22 +273,6 @@ impl SessionStateInner { Ok(authorised_entry) } - // async fn get_static_token_or_yield( - // &mut self, - // handle: &StaticTokenHandle, - // ) -> Result { - // // loop { - // // match self - // // .their_resources - // // .static_tokens - // // .get(&static_token_handle) { - // // Ok(token) => return Ok(token.clone()), - // // Err(_)=> {} - // // } - // // } - // todo!() - // } - fn clear_pending_range_if_some( &mut self, our_handle: AreaOfInterestHandle, @@ -345,11 +346,11 @@ impl SessionStateInner { // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. impl Coroutine { - pub async fn run( + pub async fn run_reconciliation( mut self, - init: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, + start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, ) -> Result<(), Error> { - if let Some((our_handle, their_handle)) = init { + if let Some((our_handle, their_handle)) = start { self.init_reconciliation(our_handle, their_handle).await?; } @@ -364,7 +365,75 @@ impl Coroutine { Ok(()) } - pub async fn on_message(&mut self, message: Message) -> Result<(), Error> { + pub async fn run_control(mut self, init: SessionInit) -> Result<(), Error> { + let reveal_message = self.state.lock().unwrap().commitment_reveal()?; + self.send_control(reveal_message).await?; + + while let Some(message) = self.recv(LogicalChannel::Control).await { + let message = message?; + info!(%message, "run_control recv"); + self.on_control_message(message, &init).await?; + if self.state.lock().unwrap().trigger_notify_if_complete() { + break; + } + } + + Ok(()) + } + + async fn on_control_message( + &mut self, + message: Message, + init: &SessionInit, + ) -> Result<(), Error> { + match message { + Message::CommitmentReveal(msg) => { + let setup_messages = self + .state + .lock() + .unwrap() + .on_commitment_reveal(msg, &init)?; + for message in setup_messages { + self.channels.control_send.send_async(&message).await?; + info!(%message, "sent"); + } + } + Message::SetupBindReadCapability(msg) => { + self.state + .lock() + .unwrap() + .on_setup_bind_read_capability(msg)?; + } + Message::SetupBindStaticToken(msg) => { + info!("A"); + self.state.lock().unwrap().on_setup_bind_static_token(msg); + info!("B"); + } + Message::SetupBindAreaOfInterest(msg) => { + let (_peer, start) = self + .state + .lock() + .unwrap() + .on_setup_bind_area_of_interest(msg)?; + self.co.yield_(Yield::StartReconciliation(start)).await; + // self.notifier.notify_sync(self.peer, notify) + // let message = ToActor::InitSession { + // state: self.state.clone(), + // channels: self.channels.clone(), + // start, + // peer, + // }; + // self.store_handle.send(message).await?; + } + Message::ControlFreeHandle(_msg) => { + // TODO: Free handles + } + _ => return Err(Error::UnsupportedMessage), + } + Ok(()) + } + + async fn on_message(&mut self, message: Message) -> Result<(), Error> { info!(%message, "recv"); match message { Message::ReconciliationSendFingerprint(message) => { @@ -379,7 +448,7 @@ impl Coroutine { Ok(()) } - pub async fn init_reconciliation( + async fn init_reconciliation( &mut self, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, @@ -409,7 +478,7 @@ impl Coroutine { Ok(()) } - pub async fn on_send_fingerprint( + async fn on_send_fingerprint( &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { @@ -466,7 +535,7 @@ impl Coroutine { debug!("on_send_fingerprint done"); Ok(()) } - pub async fn on_announce_entries( + async fn on_announce_entries( &mut self, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { @@ -483,7 +552,6 @@ impl Coroutine { let namespace = { let mut state = self.state.lock().unwrap(); - debug!(?state, "STATE"); state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; if state.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); @@ -517,7 +585,6 @@ impl Coroutine { static_token_handle, dynamic_token, } = message; - info!("on_send_entry"); let mut state = self.state.lock().unwrap(); @@ -525,7 +592,6 @@ impl Coroutine { .pending_entries .as_mut() .ok_or(Error::InvalidMessageInCurrentState)?; - info!(?remaining, "on_send_entry"); *remaining -= 1; if *remaining == 0 { state.pending_entries = None; @@ -534,43 +600,27 @@ impl Coroutine { let static_token = loop { let mut state = self.state.lock().unwrap(); - let token = state + match state .their_resources .static_tokens - .get(&static_token_handle); - info!(?token, "loop get_static_token"); - // let token = token.clone(); - match token { - Ok(token) => break token.clone(), - Err(Error::MissingResource(handle)) => { - state.their_resources.register_notify( - handle, - self.notifier - .notifier(self.peer, Yield::ResourceMissing(handle)), - ); + .get_or_notify(&static_token_handle, || { + self.notifier + .notifier(self.peer, Readyness::Resource(static_token_handle.into())) + }) { + Some(token) => break token.clone(), + None => { drop(state); - self.co.yield_(Yield::ResourceMissing(handle)).await; - continue; + self.co + .yield_(Yield::Pending(Readyness::Resource( + static_token_handle.into(), + ))) + .await } - Err(err) => return Err(err), } }; - // .clone() {} let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - // Ok(authorised_entry) - - // TODO: Remove clone - // match state.authorize_send_entry(message.clone()) { - // Ok(entry) => break entry, - // Err(Error::MissingResource(handle)) => { - // // state.their_resources.register_notify(handle, notify) - // self.co.yield_(YieldReason::ResourceMissing(handle)).await - // } - // Err(err) => return Err(err), - // } - // }; self.store_writer .borrow_mut() .ingest_entry(&authorised_entry)?; @@ -578,38 +628,6 @@ impl Coroutine { Ok(()) } - // fn on_send_entry(&self, message: ReconciliationSendEntry) -> Result<(), Error> { - // // Message::ReconciliationSendEntry(message) => { - // // let ReconciliationSendEntry { - // // entry, - // // static_token_handle, - // // dynamic_token, - // // } = message; - // // let static_token = { - // // let mut state = self.state.lock().unwrap(); - // // let mut remaining = state - // // .pending_entries - // // .clone() - // // .ok_or(Error::InvalidMessageInCurrentState)?; - // // remaining -= 1; - // // if remaining == 0 { - // // state.pending_entries = None; - // // } - // // state - // // .their_resources - // // .static_tokens - // // .get(&static_token_handle)? - // // .clone() - // // }; - // // - // // let authorisation_token = - // // AuthorisationToken::from_parts(static_token, dynamic_token); - // // let authorised_entry = - // // AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - // // self.store.ingest_entry(&authorised_entry)?; - // Ok(()) - // } - async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { self.send(msg).await } @@ -621,6 +639,7 @@ impl Coroutine { async fn send(&self, message: impl Into) -> anyhow::Result<()> { let message: Message = message.into(); let channel = message.logical_channel(); + debug!(%message, ?channel, "send"); let sender = self.channels.sender(message.logical_channel()); loop { @@ -631,7 +650,7 @@ impl Coroutine { } WriteOutcome::BufferFull => { self.co - .yield_(Yield::ChannelPending(channel, Interest::Send)) + .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Send))) .await; } } @@ -650,7 +669,7 @@ impl Coroutine { } ReadOutcome::ReadBufferEmpty => { self.co - .yield_(Yield::ChannelPending(channel, Interest::Recv)) + .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) .await; } ReadOutcome::Item(message) => { diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 3a80e939ed..dbfb259714 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -101,6 +101,15 @@ where .map(|r| &r.value) .ok_or_else(|| Error::MissingResource((*handle).into())) } + + pub fn get_or_notify(&mut self, handle: &H, notify: impl FnOnce() -> Notifier) -> Option<&R> { + if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { + Some(resource) + } else { + self.notify.entry(*handle).or_default().push_back((notify)()); + None + } + } } // #[derive(Debug)] diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 7ccca39e04..9d25fe09f0 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -30,8 +30,8 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{Channels, Coroutine, SessionState, Yield}, - Error, + coroutine::{Channels, Coroutine, Readyness, SessionState, Yield}, + Error, SessionInit, }, util::channel::{self, ReadOutcome, Receiver}, }; @@ -60,17 +60,17 @@ pub struct CoroutineNotifier { tx: flume::Sender, } impl CoroutineNotifier { - pub async fn notify(&self, peer: NodeId, notify: Yield) -> anyhow::Result<()> { + pub async fn notify(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { let msg = ToActor::Resume { peer, notify }; self.tx.send_async(msg).await?; Ok(()) } - pub fn notify_sync(&self, peer: NodeId, notify: Yield) -> anyhow::Result<()> { + pub fn notify_sync(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { let msg = ToActor::Resume { peer, notify }; self.tx.send(msg)?; Ok(()) } - pub fn notifier(&self, peer: NodeId, notify: Yield) -> Notifier { + pub fn notifier(&self, peer: NodeId, notify: Readyness) -> Notifier { Notifier { tx: self.tx.clone(), peer, @@ -82,7 +82,7 @@ impl CoroutineNotifier { #[derive(Debug, Clone)] pub struct Notifier { tx: flume::Sender, - notify: Yield, + notify: Readyness, peer: NodeId, // channel: LogicalChannel, // direction: Interest, @@ -144,28 +144,7 @@ impl StoreHandle { self.tx.send(action)?; Ok(()) } - pub fn notifier_channel( - &self, - channel: LogicalChannel, - direction: Interest, - peer: NodeId, - ) -> Notifier { - let notify = Yield::ChannelPending(channel, direction); - Notifier { - tx: self.tx.clone(), - peer, - notify, - } - } - pub fn notifier_resource(&self, peer: NodeId, handle: ResourceHandle) -> Notifier { - let notify = Yield::ResourceMissing(handle); - Notifier { - tx: self.tx.clone(), - notify, - peer, - } - } - pub fn notifier(&self, peer: NodeId, notify: Yield) -> Notifier { + pub fn notifier(&self, peer: NodeId, notify: Readyness) -> Notifier { Notifier { tx: self.tx.clone(), peer, @@ -193,24 +172,17 @@ pub enum ToActor { #[debug(skip)] state: SessionState, #[debug(skip)] - channels: Arc, - start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, + channels: Channels, + // start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, + init: SessionInit, }, DropSession { peer: NodeId, }, Resume { peer: NodeId, - notify: Yield, + notify: Readyness, }, - // ResumeSend { - // peer: NodeId, - // channel: LogicalChannel, - // }, - // ResumeRecv { - // peer: NodeId, - // channel: LogicalChannel, - // }, GetEntries { namespace: NamespaceId, #[debug(skip)] @@ -225,33 +197,30 @@ pub enum ToActor { #[derive(Debug)] struct StorageSession { state: SessionState, - channels: Arc, + channels: Channels, pending: PendingCoroutines, } #[derive(derive_more::Debug, Default)] struct PendingCoroutines { #[debug(skip)] - inner: HashMap>, // #[debug("{}", "on_control.len()")] - // on_control: VecDeque, - // #[debug("{}", "on_reconciliation.len()")] - // on_reconciliation: VecDeque, + inner: HashMap>, } impl PendingCoroutines { - fn get_mut(&mut self, pending_on: Yield) -> &mut VecDeque { + fn get_mut(&mut self, pending_on: Readyness) -> &mut VecDeque { self.inner.entry(pending_on).or_default() } - fn push_back(&mut self, pending_on: Yield, generator: ReconcileGen) { + fn push_back(&mut self, pending_on: Readyness, generator: ReconcileGen) { self.get_mut(pending_on).push_back(generator); } - fn push_front(&mut self, pending_on: Yield, generator: ReconcileGen) { + fn push_front(&mut self, pending_on: Readyness, generator: ReconcileGen) { self.get_mut(pending_on).push_front(generator); } - fn pop_front(&mut self, pending_on: Yield) -> Option { + fn pop_front(&mut self, pending_on: Readyness) -> Option { self.get_mut(pending_on).pop_front() } - fn len(&self, pending_on: &Yield) -> usize { + fn len(&self, pending_on: &Readyness) -> usize { self.inner.get(pending_on).map(|v| v.len()).unwrap_or(0) } @@ -269,7 +238,7 @@ pub struct StorageThread { } type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; -type ReconcileGen = Gen; +type ReconcileGen = (&'static str, Gen); impl StorageThread { pub fn run(&mut self) -> anyhow::Result<()> { @@ -299,7 +268,7 @@ impl StorageThread { peer, state, channels, - start, + init, // start, } => { let session = StorageSession { state, @@ -307,13 +276,14 @@ impl StorageThread { pending: Default::default(), }; self.sessions.insert(peer, session); - self.start_coroutine(peer, |routine| routine.run(start).boxed_local())?; + info!("start coroutine control"); + self.start_coroutine(peer, |routine| routine.run_control(init).boxed_local(), "control")?; } ToActor::DropSession { peer } => { self.sessions.remove(&peer); } ToActor::Resume { peer, notify } => { - self.resume_yielded(peer, notify)?; + self.resume_next(peer, notify)?; } // ToActor::ResumeRecv { peer, channel } => { // self.resume_recv(peer, channel)?; @@ -341,6 +311,7 @@ impl StorageThread { &mut self, peer: NodeId, producer: impl FnOnce(Coroutine) -> ReconcileFut, + label: &'static str, ) -> Result<(), Error> { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let snapshot = Arc::new(self.store.borrow_mut().snapshot()?); @@ -364,7 +335,7 @@ impl StorageThread { }; (producer)(routine) }); - self.resume_coroutine(peer, generator) + self.resume_coroutine(peer, (label, generator)) } // #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] @@ -392,103 +363,52 @@ impl StorageThread { // } #[instrument(skip_all, fields(session=%peer.fmt_short()))] - fn resume_yielded(&mut self, peer: NodeId, notify: Yield) -> Result<(), Error> { - let session = self.session_mut(&peer)?; - debug!(pending = session.pending.len(¬ify), "resume"); + fn resume_next(&mut self, peer: NodeId, notify: Readyness) -> Result<(), Error> { + // debug!(pending = session.pending.len(¬ify), "resume"); + // while let Some(generator) = session.pending.pop_front(notify) { + // self.resume_coroutine(peer, generator); + // } + // Ok(()) + // loop { + let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let generator = session.pending.pop_front(notify); match generator { Some(generator) => self.resume_coroutine(peer, generator), None => { debug!("nothing to resume"); + // return Ok(()); Ok(()) } } + // } } - fn resume_coroutine(&mut self, peer: NodeId, mut generator: ReconcileGen) -> Result<(), Error> { - debug!(session = peer.fmt_short(), "resume"); - let session = self.session_mut(&peer)?; - match generator.resume() { - GeneratorState::Yielded(reason) => { - info!(?reason, "yield"); - // match &reason { - // YieldReason::ResourceMissing(handle) => { - // // match handle.ty - // // self.actor_rx.s - // let notifier = Notifier { - // peer, - // tx, - // notify: YieldReason::ResourceMissing(*handle), - // }; - // session - // .state - // .lock() - // .unwrap() - // .their_resources - // .register_notify(*handle, notifier); - // } - // _ => {} - // } - session.pending.push_back(reason, generator); - Ok(()) - } - GeneratorState::Complete(res) => { - info!(?res, "complete"); - res + fn resume_coroutine(&mut self, peer: NodeId, generator: ReconcileGen) -> Result<(), Error> { + let (routine, mut generator) = generator; + debug!(session = %peer.fmt_short(), %routine, "resume"); + loop { + match generator.resume() { + GeneratorState::Yielded(yielded) => { + info!(?yielded, %routine, "yield"); + match yielded { + Yield::Pending(notify) => { + let session = self.session_mut(&peer)?; + session.pending.push_back(notify, (routine, generator)); + break Ok(()); + } + Yield::StartReconciliation(start) => { + info!("start coroutine reconciliation"); + self.start_coroutine(peer, |routine| { + routine.run_reconciliation(start).boxed_local() + }, "reconcile")?; + } + } + } + GeneratorState::Complete(res) => { + info!(?res, "complete"); + break res; + } } } } } - -// #[derive(Debug, Clone, Hash, Eq, PartialEq)] -// enum PendingOn { -// Channel { -// channel: LogicalChannel, -// interest: Interest, -// }, -// Resource { -// handle: ResourceHandle, -// }, -// } -// fn on_message(&mut self, peer: NodeId, message: Message) -> Result<(), Error> { -// info!(msg=%message, "recv"); -// match message { -// Message::ReconciliationSendFingerprint(message) => { -// self.start_coroutine(peer, |routine| { -// routine.on_send_fingerprint(message).boxed_local() -// })?; -// } -// Message::ReconciliationAnnounceEntries(message) => { -// self.start_coroutine(peer, |routine| { -// routine.on_announce_entries(message).boxed_local() -// })?; -// } -// Message::ReconciliationSendEntry(message) => { -// let session = self.session_mut(&peer)?; -// let authorised_entry = { -// let mut state = session.state.lock().unwrap(); -// let authorised_entry = state.authorize_send_entry(message)?; -// state.trigger_notify_if_complete(); -// authorised_entry -// }; -// self.store.ingest_entry(&authorised_entry)?; -// debug!("ingested entry"); -// } -// _ => return Err(Error::UnsupportedMessage), -// } -// let session = self.session(&peer)?; -// let state = session.state.lock().unwrap(); -// let started = state.reconciliation_started; -// let pending_ranges = &state.pending_ranges; -// let pending_entries = &state.pending_entries; -// let is_complete = state.is_complete(); -// info!( -// is_complete, -// started, -// ?pending_entries, -// ?pending_ranges, -// "handled" -// ); -// -// Ok(()) -// } From 0009e1b232ab8a987657c8cc683fe469d4674f69 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 12:16:42 +0200 Subject: [PATCH 019/198] much much better now --- iroh-willow/src/net.rs | 214 +++------------------------ iroh-willow/src/session/coroutine.rs | 110 ++++++-------- iroh-willow/src/session/resource.rs | 6 +- iroh-willow/src/store/actor.rs | 48 +++--- iroh-willow/src/util/channel.rs | 20 +-- 5 files changed, 112 insertions(+), 286 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index a1409d6d22..8432aeea09 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -13,7 +13,7 @@ use tokio::{ }; // use tokio_stream::StreamExt; // use tokio_util::codec::{FramedRead, FramedWrite}; -use tracing::{debug, error_span, info, instrument, Instrument, Span}; +use tracing::{debug, error_span, info, instrument, trace, warn, Instrument, Span}; use crate::{ proto::wgps::{ @@ -35,6 +35,8 @@ use self::codec::WillowCodec; pub mod codec; +const CHANNEL_CAP: usize = 1024 * 64; + // /// Read the next frame from a [`FramedRead`] but only if it is available without waiting on IO. // async fn next_if_ready( // mut reader: &mut FramedRead, @@ -86,7 +88,7 @@ pub async fn run( &store, peer, LogicalChannel::Control, - 1024, + CHANNEL_CAP, control_send_stream, control_recv_stream, ); @@ -95,7 +97,7 @@ pub async fn run( &store, peer, LogicalChannel::Reconciliation, - 1024, + CHANNEL_CAP, reconciliation_send_stream, reconciliation_recv_stream, ); @@ -121,7 +123,7 @@ pub async fn run( store .send(ToActor::InitSession { peer, - state: Arc::new(Mutex::new(state)), + state, channels: channels.clone(), init, }) @@ -181,12 +183,12 @@ async fn recv_loop( notifier: Notifier, ) -> anyhow::Result<()> { loop { - let buf = recv_stream.read_chunk(1024 * 16, true).await?; + let buf = recv_stream.read_chunk(CHANNEL_CAP, true).await?; if let Some(buf) = buf { channel_sender.write_slice_async(&buf.bytes[..]).await; - debug!(len = buf.bytes.len(), "recv"); + trace!(len = buf.bytes.len(), "recv"); if channel_sender.is_receivable_notify_set() { - debug!("notify"); + trace!("notify"); notifier.notify().await?; } } else { @@ -213,7 +215,9 @@ async fn send_loop( notifier.notify().await?; } } - send_stream.finish().await?; + send_stream.flush().await?; + // send_stream.stopped().await?; + send_stream.finish().await.ok(); debug!("send_loop close"); Ok(()) } @@ -269,8 +273,8 @@ mod tests { async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let n_betty = 1; - let n_alfie = 2; + let n_betty: usize = std::env::var("N_BETTY").as_deref().unwrap_or("1000").parse().unwrap(); + let n_alfie: usize = std::env::var("N_ALFIE").as_deref().unwrap_or("1000").parse().unwrap(); let ep_alfie = MagicEndpoint::builder() .secret_key(SecretKey::generate_with_rng(&mut rng)) @@ -402,14 +406,14 @@ mod tests { info!("alfie res {:?}", res_alfie); info!("betty res {:?}", res_betty); - info!( - "alfie store {:?}", - get_entries_debug(&handle_alfie, namespace_id).await? - ); - info!( - "betty store {:?}", - get_entries_debug(&handle_betty, namespace_id).await? - ); + // info!( + // "alfie store {:?}", + // get_entries_debug(&handle_alfie, namespace_id).await? + // ); + // info!( + // "betty store {:?}", + // get_entries_debug(&handle_betty, namespace_id).await? + // ); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); @@ -452,177 +456,3 @@ mod tests { Ok(entries) } } - -// let mut join_set = JoinSet::new(); -// join_set.spawn( -// session_fut -// .map(|r| ("session", r.map_err(|e| anyhow::Error::from(e)))) -// .instrument(Span::current()), -// ); -// join_set.spawn( -// control_recv_fut -// .map(|r| ("control_recv", r)) -// .instrument(Span::current()), -// ); -// join_set.spawn( -// reconciliation_recv_fut -// .map(|r| ("reconciliation_recv", r)) -// .instrument(Span::current()), -// ); -// join_set.spawn( -// control_send_fut -// .map(|r| ("control_send", r)) -// .instrument(Span::current()), -// ); -// join_set.spawn( -// reconciliation_send_fut -// .map(|r| ("reconciliation_send", r)) -// .instrument(Span::current()), -// ); -// -// let finish_tasks_fut = async { -// let mut failed: Option = None; -// while let Some(res) = join_set.join_next().await { -// match res { -// Ok((label, Err(err))) => { -// debug!(?err, "task {label} failed"); -// if failed.is_none() { -// failed = Some(err); -// join_set.abort_all(); -// } -// } -// Ok((label, Ok(()))) => { -// debug!("task {label} finished"); -// } -// Err(err) if err.is_cancelled() => { -// debug!("task cancelled"); -// } -// Err(err) => { -// debug!(?err, "task failed"); -// if failed.is_none() { -// failed = Some(err.into()); -// join_set.abort_all(); -// } -// } -// } -// } -// match failed { -// None => Ok(()), -// Some(err) => Err(err), -// } -// }; -// tracing::info!("COMPLETE"); -// channels.close_send(); -// completed = true; -// } - -// let channel_futs = [control_send_fut, reconciliation_send_fut, control_recv_fut, reconciliation_recv_fut]; -// let channel_futs = tokio::join!(control_send_ft); -// -// let channel_fut = async move { -// tokio::join!( -// session_fut, -// control_send_fut, -// reconciliation_send_fut, -// control_recv_fut, -// reconciliation_recv_fut -// ) -// }; -// tokio::pin!(channel_fut); -// let channel_fut = async move { -// let -// // res = &mut session_fut => res.context("session")?, -// // res = &mut control_recv_fut => res.context("control_recv")?, -// // res = &mut control_send_fut => res.context("control_send")?, -// // res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, -// // res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, -// } -// tokio::pin!(channel_fut); -// let mut completed = false; -// tokio::select! { -// biased; -// _ = on_complete.notified() => { -// tracing::info!("COMPLETE"); -// channels.close_send(); -// completed = true; -// } -// // res = &mut channel_fut => { -// // res.0?; -// // res.1?; -// // res.2?; -// // res.3?; -// // res.4?; -// // } -// res = &mut session_fut => res.context("session")?, -// res = &mut control_recv_fut => res.context("control_recv")?, -// res = &mut control_send_fut => res.context("control_send")?, -// res = &mut reconciliation_recv_fut => res.context("reconciliation_recv")?, -// res = &mut reconciliation_send_fut => res.context("reconciliation_send")?, -// } -// tracing::info!(?completed, "!CLOSED!"); -// if completed { -// let res = tokio::join!( -// session_fut, -// control_send_fut, -// reconciliation_send_fut, -// control_recv_fut, -// reconciliation_recv_fut -// ); -// // let res = channel_fut.await; -// res.0?; -// res.1?; -// res.2?; -// res.3?; -// res.4?; -// -// // control_send_fut.await?; -// // info!("control_send down"); -// // reconciliation_send_fut.await?; -// // info!("reconciliation_send down"); -// // -// // session_fut.await?; -// // info!("session down"); -// // -// // control_recv_fut.await?; -// // info!("control_recv down"); -// // reconciliation_recv_fut.await?; -// // info!("reconciliation_recv down"); -// // control_send.finish().await?; -// Ok(()) -// } else { -// Err(anyhow!( -// "All tasks finished but reconciliation did not complete" -// )) -// } -// tokio::pin!(finish_tasks_fut); -// let res = tokio::select! { -// res = &mut finish_tasks_fut => { -// match res { -// // we completed before on_complete was triggered: no success -// Ok(()) => Err(anyhow!("all tasks finished but reconciliation was not completed")), -// Err(err) => Err(err), -// } -// } -// _ = on_complete.notified()=> { -// // finish_tasks_fut.abort(); -// // join_set.abort_all(); -// Ok(()) -// } -// }; -// res -// tokio::pin!(session_fut); -// tokio::pin!(control_send_fut); -// tokio::pin!(reconciliation_send_fut); -// tokio::pin!(control_recv_fut); -// tokio::pin!(reconciliation_recv_fut); -// tokio::pin!(notified_fut); -// let res = tokio::join!( -// session_fut, -// control_send_fut, -// reconciliation_send_fut, -// control_recv_fut, -// reconciliation_recv_fut, -// notified_fut -// ); -// tracing::warn!("RES {res:?}"); -// Ok(()) diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index eb2c584cef..9efc084595 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,5 +1,6 @@ use std::{ - cell::RefCell, + borrow::BorrowMut, + cell::{RefCell, RefMut}, collections::HashSet, rc::Rc, sync::{Arc, Mutex}, @@ -11,7 +12,7 @@ use genawaiter::{ }; use iroh_net::NodeId; use tokio::sync::Notify; -use tracing::{debug, info, warn}; +use tracing::{debug, info, trace, warn}; use crate::{ proto::{ @@ -66,7 +67,7 @@ pub struct Coroutine { pub co: Co, } -pub type SessionState = Arc>; +pub type SessionState = Rc>; #[derive(Debug, Clone)] pub struct Channels { @@ -141,9 +142,16 @@ impl SessionStateInner { } } pub fn is_complete(&self) -> bool { - self.reconciliation_started + let is_complete = self.reconciliation_started && self.pending_ranges.is_empty() - && self.pending_entries.is_none() + && self.pending_entries.is_none(); + trace!( + started = self.reconciliation_started, + pending_ranges = self.pending_ranges.len(), + pending_entries = ?self.pending_entries, + "is_complete {is_complete}" + ); + is_complete } pub fn trigger_notify_if_complete(&mut self) -> bool { @@ -357,7 +365,7 @@ impl Coroutine { while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { let message = message?; self.on_message(message).await?; - if self.state.lock().unwrap().trigger_notify_if_complete() { + if self.state_mut().trigger_notify_if_complete() { break; } } @@ -365,15 +373,19 @@ impl Coroutine { Ok(()) } + fn state_mut(&mut self) -> RefMut { + RefCell::borrow_mut(&mut self.state) + } + pub async fn run_control(mut self, init: SessionInit) -> Result<(), Error> { - let reveal_message = self.state.lock().unwrap().commitment_reveal()?; + let reveal_message = self.state_mut().commitment_reveal()?; self.send_control(reveal_message).await?; while let Some(message) = self.recv(LogicalChannel::Control).await { let message = message?; - info!(%message, "run_control recv"); + debug!(%message, "run_control recv"); self.on_control_message(message, &init).await?; - if self.state.lock().unwrap().trigger_notify_if_complete() { + if self.state_mut().trigger_notify_if_complete() { break; } } @@ -388,42 +400,21 @@ impl Coroutine { ) -> Result<(), Error> { match message { Message::CommitmentReveal(msg) => { - let setup_messages = self - .state - .lock() - .unwrap() - .on_commitment_reveal(msg, &init)?; + let setup_messages = self.state_mut().on_commitment_reveal(msg, &init)?; for message in setup_messages { - self.channels.control_send.send_async(&message).await?; - info!(%message, "sent"); + debug!(%message, "send"); + self.send_control(message).await?; } } Message::SetupBindReadCapability(msg) => { - self.state - .lock() - .unwrap() - .on_setup_bind_read_capability(msg)?; + self.state_mut().on_setup_bind_read_capability(msg)?; } Message::SetupBindStaticToken(msg) => { - info!("A"); - self.state.lock().unwrap().on_setup_bind_static_token(msg); - info!("B"); + self.state_mut().on_setup_bind_static_token(msg); } Message::SetupBindAreaOfInterest(msg) => { - let (_peer, start) = self - .state - .lock() - .unwrap() - .on_setup_bind_area_of_interest(msg)?; + let (_peer, start) = self.state_mut().on_setup_bind_area_of_interest(msg)?; self.co.yield_(Yield::StartReconciliation(start)).await; - // self.notifier.notify_sync(self.peer, notify) - // let message = ToActor::InitSession { - // state: self.state.clone(), - // channels: self.channels.clone(), - // start, - // peer, - // }; - // self.store_handle.send(message).await?; } Message::ControlFreeHandle(_msg) => { // TODO: Free handles @@ -434,7 +425,7 @@ impl Coroutine { } async fn on_message(&mut self, message: Message) -> Result<(), Error> { - info!(%message, "recv"); + trace!(%message, "recv"); match message { Message::ReconciliationSendFingerprint(message) => { self.on_send_fingerprint(message).await? @@ -454,7 +445,7 @@ impl Coroutine { their_handle: AreaOfInterestHandle, ) -> Result<(), Error> { debug!("init reconciliation"); - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); let our_aoi = state.our_resources.areas_of_interest.get(&our_handle)?; let their_aoi = state.their_resources.areas_of_interest.get(&their_handle)?; @@ -482,7 +473,7 @@ impl Coroutine { &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { - debug!("on_send_fingerprint start"); + trace!("on_send_fingerprint start"); let ReconciliationSendFingerprint { range, fingerprint: their_fingerprint, @@ -492,7 +483,7 @@ impl Coroutine { } = message; let namespace = { - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); state.reconciliation_started = true; state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; state.range_is_authorised(&range, &our_handle, &their_handle)? @@ -532,14 +523,14 @@ impl Coroutine { self.split_range_and_send_parts(namespace, &range, our_handle, their_handle) .await?; } - debug!("on_send_fingerprint done"); + trace!("on_send_fingerprint done"); Ok(()) } async fn on_announce_entries( &mut self, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { - debug!("on_announce_entries start"); + trace!("on_announce_entries start"); let ReconciliationAnnounceEntries { range, count, @@ -551,12 +542,11 @@ impl Coroutine { } = message; let namespace = { - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; if state.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } - debug!("after"); let namespace = state.range_is_authorised(&range, &our_handle, &their_handle)?; if count != 0 { state.pending_entries = Some(count); @@ -575,7 +565,7 @@ impl Coroutine { ) .await?; } - debug!("on_announce_entries done"); + trace!("on_announce_entries done"); Ok(()) } @@ -586,20 +576,21 @@ impl Coroutine { dynamic_token, } = message; - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); let remaining = state .pending_entries .as_mut() .ok_or(Error::InvalidMessageInCurrentState)?; *remaining -= 1; + info!(%remaining, "ingest entry"); if *remaining == 0 { state.pending_entries = None; } drop(state); let static_token = loop { - let mut state = self.state.lock().unwrap(); + let mut state = RefCell::borrow_mut(&mut self.state); match state .their_resources .static_tokens @@ -621,10 +612,7 @@ impl Coroutine { let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - self.store_writer - .borrow_mut() - .ingest_entry(&authorised_entry)?; - debug!("ingested entry"); + RefCell::borrow_mut(&mut self.store_writer).ingest_entry(&authorised_entry)?; Ok(()) } @@ -639,16 +627,17 @@ impl Coroutine { async fn send(&self, message: impl Into) -> anyhow::Result<()> { let message: Message = message.into(); let channel = message.logical_channel(); - debug!(%message, ?channel, "send"); - let sender = self.channels.sender(message.logical_channel()); + // debug!(%message, ?channel, "send"); + let sender = self.channels.sender(channel); loop { match sender.send_or_set_notify(&message)? { WriteOutcome::Ok => { - info!(msg=%message, "sent"); + debug!(msg=%message, ch=%channel.fmt_short(), "sent"); break Ok(()); } WriteOutcome::BufferFull => { + debug!(msg=%message, ch=%channel.fmt_short(), "sent buf full, yield"); self.co .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Send))) .await; @@ -673,7 +662,7 @@ impl Coroutine { .await; } ReadOutcome::Item(message) => { - debug!(?message, "recv"); + debug!(%message, "recv"); return Some(Ok(message)); } }, @@ -690,7 +679,7 @@ impl Coroutine { is_final_reply_for_range: Option, ) -> anyhow::Result<()> { { - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); state.pending_ranges.insert((our_handle, range.clone())); } let msg = ReconciliationSendFingerprint { @@ -715,7 +704,7 @@ impl Coroutine { our_count: Option, ) -> Result<(), Error> { if want_response { - let mut state = self.state.lock().unwrap(); + let mut state = self.state_mut(); state.pending_ranges.insert((our_handle, range.clone())); } let our_count = match our_count { @@ -742,11 +731,8 @@ impl Coroutine { // TODO: partial payloads let available = entry.payload_length; // TODO avoid such frequent locking - let (static_token_handle, static_token_bind_msg) = self - .state - .lock() - .unwrap() - .bind_our_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = + RefCell::borrow_mut(&mut self.state).bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { self.send_control(msg).await?; } diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index dbfb259714..86f31ebd6d 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -18,7 +18,7 @@ pub struct ScopedResources { } impl ScopedResources { pub fn register_notify(&mut self, handle: ResourceHandle, notify: Notifier) { - tracing::info!(?handle, "register_notify"); + tracing::debug!(?handle, "register_notify"); match handle { ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_notify(h, notify), ResourceHandle::Capability(h) => self.capabilities.register_notify(h, notify), @@ -64,9 +64,9 @@ where self.next_handle += 1; let resource = Resource::new(resource); self.map.insert(handle, resource); - tracing::info!(?handle, "bind"); + tracing::debug!(?handle, "bind"); if let Some(mut notify) = self.notify.remove(&handle) { - tracing::info!(?handle, "notify {}", notify.len()); + tracing::debug!(?handle, "notify {}", notify.len()); for notify in notify.drain(..) { if let Err(err) = notify.notify_sync() { tracing::warn!(?err, "notify failed for {handle:?}"); diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 9d25fe09f0..3f96633ae8 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -15,7 +15,7 @@ use genawaiter::{ GeneratorState, }; use tokio::sync::oneshot; -use tracing::{debug, error, error_span, info, instrument, warn}; +use tracing::{debug, error, error_span, info, instrument, warn, Span}; // use iroh_net::NodeId; use super::Store; @@ -30,7 +30,7 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{Channels, Coroutine, Readyness, SessionState, Yield}, + coroutine::{Channels, Coroutine, Readyness, SessionState, SessionStateInner, Yield}, Error, SessionInit, }, util::channel::{self, ReadOutcome, Receiver}, @@ -165,12 +165,12 @@ impl Drop for StoreHandle { } } } -#[derive(derive_more::Debug)] +#[derive(derive_more::Debug, strum::Display)] pub enum ToActor { InitSession { peer: NodeId, #[debug(skip)] - state: SessionState, + state: SessionStateInner, #[debug(skip)] channels: Channels, // start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, @@ -238,7 +238,7 @@ pub struct StorageThread { } type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; -type ReconcileGen = (&'static str, Gen); +type ReconcileGen = (Span, Gen); impl StorageThread { pub fn run(&mut self) -> anyhow::Result<()> { @@ -261,7 +261,7 @@ impl StorageThread { } fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { - debug!(?message, "tick: handle_message"); + debug!(%message, "tick: handle_message"); match message { ToActor::Shutdown { .. } => unreachable!("handled in run"), ToActor::InitSession { @@ -271,13 +271,17 @@ impl StorageThread { init, // start, } => { let session = StorageSession { - state, + state: Rc::new(RefCell::new(state)), channels, pending: Default::default(), }; self.sessions.insert(peer, session); - info!("start coroutine control"); - self.start_coroutine(peer, |routine| routine.run_control(init).boxed_local(), "control")?; + debug!("start coroutine control"); + self.start_coroutine( + peer, + |routine| routine.run_control(init).boxed_local(), + error_span!("control", peer=%peer.fmt_short()), + )?; } ToActor::DropSession { peer } => { self.sessions.remove(&peer); @@ -311,7 +315,7 @@ impl StorageThread { &mut self, peer: NodeId, producer: impl FnOnce(Coroutine) -> ReconcileFut, - label: &'static str, + span: Span ) -> Result<(), Error> { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let snapshot = Arc::new(self.store.borrow_mut().snapshot()?); @@ -335,7 +339,7 @@ impl StorageThread { }; (producer)(routine) }); - self.resume_coroutine(peer, (label, generator)) + self.resume_coroutine(peer, (span, generator)) } // #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] @@ -384,28 +388,32 @@ impl StorageThread { } fn resume_coroutine(&mut self, peer: NodeId, generator: ReconcileGen) -> Result<(), Error> { - let (routine, mut generator) = generator; - debug!(session = %peer.fmt_short(), %routine, "resume"); + let (span, mut generator) = generator; + let _guard = span.enter(); + debug!("resume"); loop { match generator.resume() { GeneratorState::Yielded(yielded) => { - info!(?yielded, %routine, "yield"); + debug!(?yielded, "yield"); match yielded { Yield::Pending(notify) => { let session = self.session_mut(&peer)?; - session.pending.push_back(notify, (routine, generator)); + drop(_guard); + session.pending.push_back(notify, (span, generator)); break Ok(()); } Yield::StartReconciliation(start) => { - info!("start coroutine reconciliation"); - self.start_coroutine(peer, |routine| { - routine.run_reconciliation(start).boxed_local() - }, "reconcile")?; + debug!("start coroutine reconciliation"); + self.start_coroutine( + peer, + |routine| routine.run_reconciliation(start).boxed_local(), + error_span!("reconcile"), + )?; } } } GeneratorState::Complete(res) => { - info!(?res, "complete"); + debug!(?res, "complete"); break res; } } diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 43a6953cd7..a678fecf0d 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -6,7 +6,7 @@ use std::{ use bytes::{Buf, Bytes, BytesMut}; use tokio::sync::Notify; -use tracing::{debug, info}; +use tracing::{debug, info, trace}; use crate::proto::wgps::Message; @@ -55,17 +55,17 @@ impl Shared { fn read_advance(&mut self, cnt: usize) { self.buf.advance(cnt); - if cnt > 0 && self.write_blocked { - self.write_blocked = false; - self.notify_writable.notify_one(); + if cnt > 0 { + // self.write_blocked = false; + self.notify_writable.notify_waiters(); } } fn read_bytes(&mut self) -> Bytes { let len = self.buf.len(); - if len > 0 && self.write_blocked { - self.write_blocked = false; - self.notify_writable.notify_one(); + if len > 0 { + // self.write_blocked = false; + self.notify_writable.notify_waiters(); } self.buf.split_to(len).freeze() } @@ -102,6 +102,7 @@ impl Shared { fn read_message(&mut self) -> anyhow::Result> { let data = self.read_slice(); + trace!("read, remaining {}", data.len()); let res = match T::decode_from(data)? { DecodeOutcome::NeedMoreData => { if self.closed() { @@ -118,7 +119,7 @@ impl Shared { Ok(res) } - // fn receiver_want_notify(&mut self) { + // fn receiver_want_notify(&mut self::) { // self.need_read_notify = true; // } // fn need_write_notify(&mut self) { @@ -280,7 +281,7 @@ impl Sender { } else { let out = shared.write_slice(data.len()).expect("just checked"); out.copy_from_slice(data); - shared.notify_readable.notify_one(); + shared.notify_readable.notify_waiters(); break; // return true; } @@ -309,6 +310,7 @@ impl Sender { if matches!(outcome, WriteOutcome::BufferFull) { shared.need_write_notify = true; } + debug!("send buf remaining: {}", shared.remaining_write_capacity()); Ok(outcome) } From c921217697a6133665953d8a181af24a1b6be810 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 13:59:46 +0200 Subject: [PATCH 020/198] little cleanup --- Cargo.lock | 4 + Cargo.toml | 3 + iroh-willow/Cargo.toml | 4 + iroh-willow/src/net.rs | 17 ++- iroh-willow/src/proto.rs | 2 +- iroh-willow/src/proto/challenge.rs | 5 +- iroh-willow/src/proto/keys.rs | 4 +- iroh-willow/src/proto/meadowcap.rs | 1 + iroh-willow/src/proto/wgps.rs | 6 ++ iroh-willow/src/proto/willow.rs | 5 +- iroh-willow/src/session.rs | 85 --------------- iroh-willow/src/session/coroutine.rs | 152 ++++++++++++++------------- iroh-willow/src/session/resource.rs | 5 +- iroh-willow/src/store.rs | 8 +- iroh-willow/src/store/actor.rs | 6 +- 15 files changed, 133 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67941e7175..a2fcc04d4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2933,7 +2933,9 @@ dependencies = [ "iroh-metrics", "iroh-net", "iroh-test", + "itertools 0.12.1", "num_enum", + "once_cell", "parking_lot", "postcard", "proptest", @@ -2941,10 +2943,12 @@ dependencies = [ "rand", "rand_chacha", "rand_core", + "rayon", "redb 2.0.0", "rtrb", "self_cell", "serde", + "smallvec", "strum 0.25.0", "tempfile", "test-strategy", diff --git a/Cargo.toml b/Cargo.toml index 2c23fef057..3eae79edb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,9 @@ resolver = "2" [profile.release] debug = true +[profile.bench] +debug = true + [profile.optimized-release] inherits = 'release' debug = false diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 1c4d2861f7..cb711c5dda 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -48,6 +48,10 @@ zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } genawaiter = "0.99.1" rtrb = "0.3.0" parking_lot = "0.12.2" +once_cell = "1.19.0" +rayon = "1.10.0" +smallvec = "1.13.2" +itertools = "0.12.1" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 8432aeea09..33bcd19cb8 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -269,12 +269,20 @@ mod tests { const ALPN: &[u8] = b"iroh-willow/0"; - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let n_betty: usize = std::env::var("N_BETTY").as_deref().unwrap_or("1000").parse().unwrap(); - let n_alfie: usize = std::env::var("N_ALFIE").as_deref().unwrap_or("1000").parse().unwrap(); + let n_betty: usize = std::env::var("N_BETTY") + .as_deref() + .unwrap_or("1000") + .parse() + .unwrap(); + let n_alfie: usize = std::env::var("N_ALFIE") + .as_deref() + .unwrap_or("1000") + .parse() + .unwrap(); let ep_alfie = MagicEndpoint::builder() .secret_key(SecretKey::generate_with_rng(&mut rng)) @@ -381,6 +389,8 @@ mod tests { }; debug!("init constructed"); + println!("init took {:?}", start.elapsed()); + let start = Instant::now(); let handle_alfie = StoreHandle::spawn(store_alfie, node_id_alfie); let handle_betty = StoreHandle::spawn(store_betty, node_id_betty); @@ -403,6 +413,7 @@ mod tests { ), ); info!(time=?start.elapsed(), "reconciliation finished!"); + println!("reconciliation took {:?}", start.elapsed()); info!("alfie res {:?}", res_alfie); info!("betty res {:?}", res_betty); diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index cd3dc110f4..168a8df3cd 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,6 +1,6 @@ +pub mod challenge; pub mod grouping; pub mod keys; pub mod meadowcap; pub mod wgps; pub mod willow; -pub mod challenge; diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 548ce32c68..7b4aec6ac9 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -2,7 +2,10 @@ use iroh_base::hash::Hash; use crate::session::{Error, Role}; -use super::{keys::{UserPublicKey, UserSecretKey, UserSignature}, wgps::{AccessChallenge, ChallengeHash}}; +use super::{ + keys::{UserPublicKey, UserSecretKey, UserSignature}, + wgps::{AccessChallenge, ChallengeHash}, +}; #[derive(Debug)] pub enum ChallengeState { diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index de69a5e163..2d343116dc 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -327,13 +327,13 @@ impl From<&UserSecretKey> for UserPublicKey { } /// The signature obtained by signing a message with a [`NamespaceSecretKey`]. -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Deref)] +#[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct NamespaceSignature(ed25519_dalek::Signature); bytestring!(NamespaceSignature, SIGNATURE_LENGTH); /// The signature obtained by signing a message with a [`UserSecretKey`]. -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Deref)] +#[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct UserSignature(ed25519_dalek::Signature); bytestring!(UserSignature, SIGNATURE_LENGTH); diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 22bac8f7bc..795fcb893f 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,3 +1,4 @@ +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use crate::util::Encoder; diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index dc3ba49aad..a282d7772c 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -213,6 +213,12 @@ pub enum Message { ControlFreeHandle(ControlFreeHandle), } +impl Message { + pub fn same_kind(&self, other: &Self) -> bool { + std::mem::discriminant(self) == std::mem::discriminant(other) + } +} + impl Encoder for Message { fn encoded_len(&self) -> usize { let data_len = postcard::experimental::serialized_size(&self).unwrap(); diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index e299f6b99f..89bbc7c462 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -7,6 +7,7 @@ use serde::{Deserialize, Serialize}; use super::{ keys::{self, UserSecretKey}, meadowcap::{self, attach_authorisation, is_authorised_write, InvalidParams, McCapability}, + wgps::{DynamicToken, StaticToken}, }; /// A type for identifying namespaces. @@ -229,8 +230,10 @@ pub struct AuthorisedEntry(Entry, AuthorisationToken); impl AuthorisedEntry { pub fn try_from_parts( entry: Entry, - authorisation_token: AuthorisationToken, + static_token: StaticToken, + dynamic_token: DynamicToken, ) -> Result { + let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 1bef828780..94e1557850 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -107,88 +107,3 @@ pub struct SessionInit { // TODO: allow multiple areas of interest? pub area_of_interest: AreaOfInterest, } - -// #[derive(Debug)] -// pub struct ControlLoop { -// init: SessionInit, -// channels: Arc, -// state: SessionState, -// store_handle: StoreHandle, -// } -// -// impl ControlLoop { -// pub fn new( -// state: SessionStateInner, -// channels: Channels, -// store_handle: StoreHandle, -// init: SessionInit, -// ) -> Self { -// Self { -// init, -// channels: Arc::new(channels), -// state: Arc::new(Mutex::new(state)), -// store_handle, -// } -// } -// -// #[instrument(skip_all)] -// pub async fn run(mut self) -> Result<(), Error> { -// let reveal_message = self.state.lock().unwrap().commitment_reveal()?; -// self.channels -// .control_send -// .send_async(&reveal_message) -// .await?; -// while let Some(message) = self.channels.control_recv.recv_async().await { -// let message = message?; -// info!(%message, "recv"); -// self.on_control_message(message).await?; -// } -// debug!("run_control finished"); -// Ok(()) -// } -// -// async fn on_control_message(&mut self, message: Message) -> Result<(), Error> { -// match message { -// Message::CommitmentReveal(msg) => { -// let setup_messages = self -// .state -// .lock() -// .unwrap() -// .on_commitment_reveal(msg, &self.init)?; -// for message in setup_messages { -// self.channels.control_send.send_async(&message).await?; -// info!(%message, "sent"); -// } -// } -// Message::SetupBindReadCapability(msg) => { -// self.state -// .lock() -// .unwrap() -// .on_setup_bind_read_capability(msg)?; -// } -// Message::SetupBindStaticToken(msg) => { -// self.state.lock().unwrap().on_setup_bind_static_token(msg); -// } -// Message::SetupBindAreaOfInterest(msg) => { -// let (peer, start) = self -// .state -// .lock() -// .unwrap() -// .on_setup_bind_area_of_interest(msg)?; -// let message = ToActor::InitSession { -// state: self.state.clone(), -// channels: self.channels.clone(), -// start, -// peer, -// }; -// self.store_handle.send(message).await?; -// } -// Message::ControlFreeHandle(_msg) => { -// // TODO: Free handles -// } -// _ => return Err(Error::UnsupportedMessage), -// } -// Ok(()) -// } -// } - diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 9efc084595..62d5c8434b 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,5 +1,4 @@ use std::{ - borrow::BorrowMut, cell::{RefCell, RefMut}, collections::HashSet, rc::Rc, @@ -11,6 +10,7 @@ use genawaiter::{ GeneratorState, }; use iroh_net::NodeId; +use smallvec::SmallVec; use tokio::sync::Notify; use tracing::{debug, info, trace, warn}; @@ -19,6 +19,7 @@ use crate::{ challenge::ChallengeState, grouping::ThreeDRange, keys::{NamespaceId, NamespacePublicKey}, + meadowcap::McCapability, wgps::{ AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, @@ -48,17 +49,10 @@ pub enum Readyness { Resource(ResourceHandle), } -// #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] -// pub enum NotifyCoroutine { -// StartReconciliation(Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), -// ChannelReady(LogicalChannel, Interest), -// ResourceReady(ResourceHandle), -// } - #[derive(derive_more::Debug)] pub struct Coroutine { pub peer: NodeId, - pub store_snapshot: Arc, + pub store_snapshot: Rc, pub store_writer: Rc>, pub channels: Channels, pub state: SessionState, @@ -253,15 +247,7 @@ impl SessionStateInner { Ok((self.peer, start)) } - pub fn authorize_send_entry( - &mut self, - message: ReconciliationSendEntry, - ) -> Result { - let ReconciliationSendEntry { - entry, - static_token_handle, - dynamic_token, - } = message; + pub fn on_send_entry(&mut self) -> Result<(), Error> { let remaining = self .pending_entries .as_mut() @@ -270,15 +256,7 @@ impl SessionStateInner { if *remaining == 0 { self.pending_entries = None; } - let static_token = self - .their_resources - .static_tokens - .get(&static_token_handle)? - .clone(); - - let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); - let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - Ok(authorised_entry) + Ok(()) } fn clear_pending_range_if_some( @@ -364,7 +342,7 @@ impl Coroutine { while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { let message = message?; - self.on_message(message).await?; + self.on_reconciliation_message(message).await?; if self.state_mut().trigger_notify_if_complete() { break; } @@ -373,10 +351,6 @@ impl Coroutine { Ok(()) } - fn state_mut(&mut self) -> RefMut { - RefCell::borrow_mut(&mut self.state) - } - pub async fn run_control(mut self, init: SessionInit) -> Result<(), Error> { let reveal_message = self.state_mut().commitment_reveal()?; self.send_control(reveal_message).await?; @@ -424,7 +398,7 @@ impl Coroutine { Ok(()) } - async fn on_message(&mut self, message: Message) -> Result<(), Error> { + async fn on_reconciliation_message(&mut self, message: Message) -> Result<(), Error> { trace!(%message, "recv"); match message { Message::ReconciliationSendFingerprint(message) => { @@ -570,50 +544,40 @@ impl Coroutine { } async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { - let ReconciliationSendEntry { - entry, - static_token_handle, - dynamic_token, - } = message; - - let mut state = self.state_mut(); - - let remaining = state - .pending_entries - .as_mut() - .ok_or(Error::InvalidMessageInCurrentState)?; - *remaining -= 1; - info!(%remaining, "ingest entry"); - if *remaining == 0 { - state.pending_entries = None; - } - drop(state); + let static_token = self.get_static_token(message.static_token_handle).await; + + self.state_mut().on_send_entry()?; + + let authorised_entry = AuthorisedEntry::try_from_parts( + message.entry.entry, + static_token, + message.dynamic_token, + )?; + self.store_writer + .borrow_mut() + .ingest_entry(&authorised_entry)?; + Ok(()) + } - let static_token = loop { - let mut state = RefCell::borrow_mut(&mut self.state); + async fn get_static_token(&mut self, handle: StaticTokenHandle) -> StaticToken { + loop { + let mut state = self.state.borrow_mut(); match state .their_resources .static_tokens - .get_or_notify(&static_token_handle, || { + .get_or_notify(&handle, || { self.notifier - .notifier(self.peer, Readyness::Resource(static_token_handle.into())) + .notifier(self.peer, Readyness::Resource(handle.into())) }) { Some(token) => break token.clone(), None => { drop(state); self.co - .yield_(Yield::Pending(Readyness::Resource( - static_token_handle.into(), - ))) + .yield_(Yield::Pending(Readyness::Resource(handle.into()))) .await } } - }; - - let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); - let authorised_entry = AuthorisedEntry::try_from_parts(entry.entry, authorisation_token)?; - RefCell::borrow_mut(&mut self.store_writer).ingest_entry(&authorised_entry)?; - Ok(()) + } } async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { @@ -646,6 +610,44 @@ impl Coroutine { } } + async fn recv_bulk( + &self, + channel: LogicalChannel, + ) -> Option>> { + let receiver = self.channels.receiver(channel); + let mut buf = SmallVec::<[Message; N]>::new(); + loop { + match receiver.read_message_or_set_notify() { + Err(err) => return Some(Err(err)), + Ok(outcome) => match outcome { + ReadOutcome::Closed => { + if buf.is_empty() { + debug!("recv: closed"); + return None; + } else { + return Some(Ok(buf)); + } + } + ReadOutcome::ReadBufferEmpty => { + if buf.is_empty() { + self.co + .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) + .await; + } else { + return Some(Ok(buf)); + } + } + ReadOutcome::Item(message) => { + debug!(%message, "recv"); + buf.push(message); + if buf.len() == N { + return Some(Ok(buf)); + } + } + }, + } + } + } async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); loop { @@ -730,9 +732,10 @@ impl Coroutine { let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - // TODO avoid such frequent locking - let (static_token_handle, static_token_bind_msg) = - RefCell::borrow_mut(&mut self.state).bind_our_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = self + .state + .borrow_mut() + .bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { self.send_control(msg).await?; } @@ -755,11 +758,11 @@ impl Coroutine { ) -> Result<(), Error> { // TODO: expose this config let config = SyncConfig::default(); - { - let iter = self.store_snapshot.split(namespace, &range, &config)?; - // TODO: avoid collect - let iter = iter.collect::>().into_iter(); - let mut iter = iter.peekable(); + // clone to avoid borrow checker trouble + let store_snapshot = Rc::clone(&self.store_snapshot); + let mut iter = store_snapshot + .split_range(namespace, &range, &config)? + .peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; let is_last = iter.peek().is_none(); @@ -789,7 +792,10 @@ impl Coroutine { } } } - } Ok(()) } + + fn state_mut(&mut self) -> RefMut { + self.state.borrow_mut() + } } diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 86f31ebd6d..b27ab07da3 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -106,7 +106,10 @@ where if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { Some(resource) } else { - self.notify.entry(*handle).or_default().push_back((notify)()); + self.notify + .entry(*handle) + .or_default() + .push_back((notify)()); None } } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 1d798c2329..25a5b6209c 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -30,7 +30,7 @@ impl Default for SyncConfig { pub trait ReadonlyStore: Send + 'static { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; - fn split( + fn split_range( &self, namespace: NamespaceId, range: &ThreeDRange, @@ -78,7 +78,7 @@ impl ReadonlyStore for MemoryStore { Ok(fingerprint) } - fn split( + fn split_range( &self, namespace: NamespaceId, range: &ThreeDRange, @@ -174,13 +174,13 @@ impl ReadonlyStore for Arc { MemoryStore::fingerprint(&self, namespace, range) } - fn split( + fn split_range( &self, namespace: NamespaceId, range: &ThreeDRange, config: &SyncConfig, ) -> Result>> { - MemoryStore::split(&self, namespace, range, config) + MemoryStore::split_range(&self, namespace, range, config) } fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 3f96633ae8..3f477e97c7 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -315,10 +315,10 @@ impl StorageThread { &mut self, peer: NodeId, producer: impl FnOnce(Coroutine) -> ReconcileFut, - span: Span + span: Span, ) -> Result<(), Error> { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; - let snapshot = Arc::new(self.store.borrow_mut().snapshot()?); + let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); let channels = session.channels.clone(); let state = session.state.clone(); @@ -330,7 +330,7 @@ impl StorageThread { let generator = Gen::new(move |co| { let routine = Coroutine { peer, - store_snapshot: snapshot, + store_snapshot, store_writer, notifier, channels, From ca433795c2f0a100411a1f3792a0a1e371292938 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 15:27:41 +0200 Subject: [PATCH 021/198] big cleanup round --- iroh-willow/src/lib.rs | 2 +- iroh-willow/src/net.rs | 37 +--- iroh-willow/src/net/codec.rs | 55 ----- iroh-willow/src/session.rs | 55 +---- iroh-willow/src/session/coroutine.rs | 311 +++------------------------ iroh-willow/src/session/error.rs | 50 +++++ iroh-willow/src/session/state.rs | 279 ++++++++++++++++++++++++ iroh-willow/src/store/actor.rs | 8 +- 8 files changed, 380 insertions(+), 417 deletions(-) delete mode 100644 iroh-willow/src/net/codec.rs create mode 100644 iroh-willow/src/session/error.rs create mode 100644 iroh-willow/src/session/state.rs diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 8aef848b16..1375ed16a4 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,6 +1,6 @@ //! Implementation of willow -#![allow(missing_docs, unused_imports, dead_code)] +#![allow(missing_docs)] pub mod net; pub mod proto; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 33bcd19cb8..9d946573ac 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,19 +1,10 @@ -use std::{ - pin::Pin, - sync::{Arc, Mutex}, - task::Poll, -}; - -use anyhow::{anyhow, ensure, Context}; -use futures::{FutureExt, SinkExt, Stream, TryFutureExt}; +use anyhow::ensure; use iroh_base::{hash::Hash, key::NodeId}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, }; -// use tokio_stream::StreamExt; -// use tokio_util::codec::{FramedRead, FramedWrite}; -use tracing::{debug, error_span, info, instrument, trace, warn, Instrument, Span}; +use tracing::{debug, error_span, instrument, trace, Instrument}; use crate::{ proto::wgps::{ @@ -21,8 +12,8 @@ use crate::{ MAX_PAYLOAD_SIZE_POWER, }, session::{ - coroutine::{Channels, Readyness, SessionStateInner, Yield}, - Role, SessionInit, + coroutine::{Channels, Readyness}, + Role, SessionInit, SessionState, }, store::actor::{Interest, Notifier, StoreHandle, ToActor}, util::{ @@ -31,24 +22,8 @@ use crate::{ }, }; -use self::codec::WillowCodec; - -pub mod codec; - const CHANNEL_CAP: usize = 1024 * 64; -// /// Read the next frame from a [`FramedRead`] but only if it is available without waiting on IO. -// async fn next_if_ready( -// mut reader: &mut FramedRead, -// ) -> Option> { -// futures::future::poll_fn(|cx| match Pin::new(&mut reader).poll_next(cx) { -// Poll::Ready(r) => Poll::Ready(r), -// Poll::Pending => Poll::Ready(None), -// }) -// .await -// } - -// #[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role, peer=%peer.fmt_short()))] #[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role))] pub async fn run( me: NodeId, @@ -108,7 +83,7 @@ pub async fn run( reconciliation_send, reconciliation_recv, }; - let state = SessionStateInner::new( + let state = SessionState::new( our_role, peer, our_nonce, @@ -255,7 +230,7 @@ mod tests { use crate::{ net::run, proto::{ - grouping::{AreaOfInterest, ThreeDRange}, + grouping::{AreaOfInterest}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, willow::{Entry, Path, SubspaceId}, diff --git a/iroh-willow/src/net/codec.rs b/iroh-willow/src/net/codec.rs deleted file mode 100644 index ba4018c3ec..0000000000 --- a/iroh-willow/src/net/codec.rs +++ /dev/null @@ -1,55 +0,0 @@ -use anyhow::ensure; -use bytes::{Buf, BufMut, BytesMut}; -use tokio_util::codec::{Decoder, Encoder}; - -use crate::proto::wgps::Message; - -#[derive(Debug, Default)] -pub struct WillowCodec; - -const MAX_MESSAGE_SIZE: usize = 1024 * 1024 * 1024; // This is likely too large, but lets have some restrictions - -impl Decoder for WillowCodec { - type Item = Message; - type Error = anyhow::Error; - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - if src.len() < 4 { - return Ok(None); - } - let bytes: [u8; 4] = src[..4].try_into().unwrap(); - let frame_len = u32::from_be_bytes(bytes) as usize; - ensure!( - frame_len <= MAX_MESSAGE_SIZE, - "received message that is too large: {}", - frame_len - ); - if src.len() < 4 + frame_len { - return Ok(None); - } - - let message: Message = postcard::from_bytes(&src[4..4 + frame_len])?; - src.advance(4 + frame_len); - Ok(Some(message)) - } -} - -impl Encoder for WillowCodec { - type Error = anyhow::Error; - - fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), Self::Error> { - let len = postcard::experimental::serialized_size(&item)?; - ensure!( - len <= MAX_MESSAGE_SIZE, - "attempting to send message that is too large {}", - len - ); - - dst.put_u32(u32::try_from(len).expect("already checked")); - if dst.len() < 4 + len { - dst.resize(4 + len, 0u8); - } - postcard::to_slice(&item, &mut dst[4..])?; - - Ok(()) - } -} diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 94e1557850..599da2c3db 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -31,61 +31,16 @@ use crate::{ util::channel::ReadOutcome, }; -use self::{ - coroutine::{Channels, SessionState, SessionStateInner}, - resource::ScopedResources, -}; +use self::{coroutine::Channels, resource::ScopedResources}; pub mod coroutine; +mod error; pub mod resource; +mod state; mod util; -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("local store failed")] - Store(#[from] anyhow::Error), - #[error("wrong secret key for capability")] - WrongSecretKeyForCapability, - #[error("missing resource {0:?}")] - MissingResource(ResourceHandle), - #[error("received capability is invalid")] - InvalidCapability, - #[error("received capability has an invalid signature")] - InvalidSignature, - #[error("missing resource")] - RangeOutsideCapability, - #[error("received a message that is not valid in the current session state")] - InvalidMessageInCurrentState, - #[error("our and their area of interests refer to different namespaces")] - AreaOfInterestNamespaceMismatch, - #[error("our and their area of interests do not overlap")] - AreaOfInterestDoesNotOverlap, - #[error("received an entry which is not authorised")] - UnauthorisedEntryReceived, - #[error("received an unsupported message type")] - UnsupportedMessage, - #[error("the received nonce does not match the received committment")] - BrokenCommittement, - #[error("received an actor message for unknown session")] - SessionNotFound, -} - -impl From for Error { - fn from(_value: Unauthorised) -> Self { - Self::UnauthorisedEntryReceived - } -} -impl From for Error { - fn from(_value: InvalidCapability) -> Self { - Self::InvalidCapability - } -} - -impl From for Error { - fn from(_value: SignatureError) -> Self { - Self::InvalidSignature - } -} +pub use self::error::Error; +pub use self::state::{SharedSessionState, SessionState}; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Role { diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 62d5c8434b..c7647a57e6 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -36,7 +36,7 @@ use crate::{ util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; -use super::{resource::ScopedResources, Error, Role, Scope, SessionInit}; +use super::{resource::ScopedResources, Error, Role, Scope, SessionInit, SharedSessionState, SessionState}; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { @@ -55,14 +55,12 @@ pub struct Coroutine { pub store_snapshot: Rc, pub store_writer: Rc>, pub channels: Channels, - pub state: SessionState, + pub state: SharedSessionState, pub notifier: CoroutineNotifier, #[debug(skip)] pub co: Co, } -pub type SessionState = Rc>; - #[derive(Debug, Clone)] pub struct Channels { pub control_send: Sender, @@ -90,245 +88,6 @@ impl Channels { } } -#[derive(Debug)] -pub struct SessionStateInner { - our_role: Role, - peer: NodeId, - our_resources: ScopedResources, - their_resources: ScopedResources, - reconciliation_started: bool, - pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pending_entries: Option, - notify_complete: Arc, - challenge: ChallengeState, - our_current_aoi: Option, -} - -impl SessionStateInner { - pub fn new( - our_role: Role, - peer: NodeId, - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - _their_maximum_payload_size: usize, - ) -> Self { - let challenge_state = ChallengeState::Committed { - our_nonce, - received_commitment, - }; - Self { - our_role, - peer, - challenge: challenge_state, - reconciliation_started: false, - our_resources: Default::default(), - their_resources: Default::default(), - pending_ranges: Default::default(), - pending_entries: Default::default(), - notify_complete: Default::default(), - our_current_aoi: Default::default(), - } - } - fn resources(&self, scope: Scope) -> &ScopedResources { - match scope { - Scope::Ours => &self.our_resources, - Scope::Theirs => &self.their_resources, - } - } - pub fn is_complete(&self) -> bool { - let is_complete = self.reconciliation_started - && self.pending_ranges.is_empty() - && self.pending_entries.is_none(); - trace!( - started = self.reconciliation_started, - pending_ranges = self.pending_ranges.len(), - pending_entries = ?self.pending_entries, - "is_complete {is_complete}" - ); - is_complete - } - - pub fn trigger_notify_if_complete(&mut self) -> bool { - if self.is_complete() { - self.notify_complete.notify_waiters(); - true - } else { - false - } - } - - pub fn notify_complete(&self) -> Arc { - Arc::clone(&self.notify_complete) - } - - pub fn commitment_reveal(&mut self) -> Result { - match self.challenge { - ChallengeState::Committed { our_nonce, .. } => { - Ok(CommitmentReveal { nonce: our_nonce }.into()) - } - _ => Err(Error::InvalidMessageInCurrentState), - } - // let msg = CommitmentReveal { nonce: our_nonce }; - } - - pub fn on_commitment_reveal( - &mut self, - msg: CommitmentReveal, - init: &SessionInit, - ) -> Result<[Message; 2], Error> { - self.challenge.reveal(self.our_role, msg.nonce)?; - self.setup(init) - } - - pub fn on_setup_bind_read_capability( - &mut self, - msg: SetupBindReadCapability, - ) -> Result<(), Error> { - // TODO: verify intersection handle - msg.capability.validate()?; - self.challenge - .verify(msg.capability.receiver(), &msg.signature)?; - self.their_resources.capabilities.bind(msg.capability); - Ok(()) - } - - pub fn on_setup_bind_static_token(&mut self, msg: SetupBindStaticToken) { - self.their_resources.static_tokens.bind(msg.static_token); - } - - fn setup(&mut self, init: &SessionInit) -> Result<[Message; 2], Error> { - let area_of_interest = init.area_of_interest.clone(); - let capability = init.capability.clone(); - - debug!(?init, "init"); - if *capability.receiver() != init.user_secret_key.public_key() { - return Err(Error::WrongSecretKeyForCapability); - } - - // TODO: implement private area intersection - let intersection_handle = 0.into(); - let signature = self.challenge.sign(&init.user_secret_key)?; - - let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); - let msg1 = SetupBindReadCapability { - capability, - handle: intersection_handle, - signature, - }; - - let msg2 = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg2.clone()); - self.our_current_aoi = Some(our_aoi_handle); - Ok([msg1.into(), msg2.into()]) - } - - pub fn on_setup_bind_area_of_interest( - &mut self, - msg: SetupBindAreaOfInterest, - ) -> Result<(NodeId, Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), Error> { - let capability = self - .resources(Scope::Theirs) - .capabilities - .get(&msg.authorisation)?; - capability.try_granted_area(&msg.area_of_interest.area)?; - let their_handle = self.their_resources.areas_of_interest.bind(msg); - let start = if self.our_role == Role::Alfie { - let our_handle = self - .our_current_aoi - .clone() - .ok_or(Error::InvalidMessageInCurrentState)?; - Some((our_handle, their_handle)) - } else { - None - }; - Ok((self.peer, start)) - } - - pub fn on_send_entry(&mut self) -> Result<(), Error> { - let remaining = self - .pending_entries - .as_mut() - .ok_or(Error::InvalidMessageInCurrentState)?; - *remaining -= 1; - if *remaining == 0 { - self.pending_entries = None; - } - Ok(()) - } - - fn clear_pending_range_if_some( - &mut self, - our_handle: AreaOfInterestHandle, - pending_range: Option, - ) -> Result<(), Error> { - if let Some(range) = pending_range { - // TODO: avoid clone - if !self.pending_ranges.remove(&(our_handle, range.clone())) { - warn!("received duplicate final reply for range marker"); - Err(Error::InvalidMessageInCurrentState) - } else { - Ok(()) - } - } else { - Ok(()) - } - } - - fn bind_our_static_token( - &mut self, - static_token: StaticToken, - ) -> anyhow::Result<(StaticTokenHandle, Option)> { - let (handle, is_new) = self - .our_resources - .static_tokens - .bind_if_new(static_token.clone()); - let msg = is_new.then(|| SetupBindStaticToken { static_token }); - Ok((handle, msg)) - } - - fn handle_to_namespace_id( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result { - let aoi = self.resources(scope).areas_of_interest.get(handle)?; - let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; - let namespace_id = capability.granted_namespace().into(); - Ok(namespace_id) - } - - fn range_is_authorised( - &self, - range: &ThreeDRange, - receiver_handle: &AreaOfInterestHandle, - sender_handle: &AreaOfInterestHandle, - ) -> Result { - let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; - let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; - if our_namespace != their_namespace { - return Err(Error::AreaOfInterestNamespaceMismatch); - } - let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; - let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; - - if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { - return Err(Error::RangeOutsideCapability); - } - Ok(our_namespace.into()) - } - - fn handle_to_aoi( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result<&SetupBindAreaOfInterest, Error> { - self.resources(scope).areas_of_interest.get(handle) - } -} - // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. impl Coroutine { @@ -758,44 +517,44 @@ impl Coroutine { ) -> Result<(), Error> { // TODO: expose this config let config = SyncConfig::default(); - // clone to avoid borrow checker trouble - let store_snapshot = Rc::clone(&self.store_snapshot); - let mut iter = store_snapshot - .split_range(namespace, &range, &config)? - .peekable(); - while let Some(res) = iter.next() { - let (subrange, action) = res?; - let is_last = iter.peek().is_none(); - let is_final_reply = is_last.then(|| range.clone()); - match action { - SplitAction::SendEntries(count) => { - self.announce_and_send_entries( - namespace, - &subrange, - our_handle, - their_handle, - true, - is_final_reply, - Some(count), - ) - .await?; - } - SplitAction::SendFingerprint(fingerprint) => { - self.send_fingerprint( - subrange, - fingerprint, - our_handle, - their_handle, - is_final_reply, - ) - .await?; - } + // clone to avoid borrow checker trouble + let store_snapshot = Rc::clone(&self.store_snapshot); + let mut iter = store_snapshot + .split_range(namespace, &range, &config)? + .peekable(); + while let Some(res) = iter.next() { + let (subrange, action) = res?; + let is_last = iter.peek().is_none(); + let is_final_reply = is_last.then(|| range.clone()); + match action { + SplitAction::SendEntries(count) => { + self.announce_and_send_entries( + namespace, + &subrange, + our_handle, + their_handle, + true, + is_final_reply, + Some(count), + ) + .await?; + } + SplitAction::SendFingerprint(fingerprint) => { + self.send_fingerprint( + subrange, + fingerprint, + our_handle, + their_handle, + is_final_reply, + ) + .await?; } } + } Ok(()) } - fn state_mut(&mut self) -> RefMut { + fn state_mut(&mut self) -> RefMut { self.state.borrow_mut() } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs new file mode 100644 index 0000000000..da918d3c0a --- /dev/null +++ b/iroh-willow/src/session/error.rs @@ -0,0 +1,50 @@ +use ed25519_dalek::SignatureError; + +use crate::proto::{meadowcap::InvalidCapability, wgps::ResourceHandle, willow::Unauthorised}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("local store failed")] + Store(#[from] anyhow::Error), + #[error("wrong secret key for capability")] + WrongSecretKeyForCapability, + #[error("missing resource {0:?}")] + MissingResource(ResourceHandle), + #[error("received capability is invalid")] + InvalidCapability, + #[error("received capability has an invalid signature")] + InvalidSignature, + #[error("missing resource")] + RangeOutsideCapability, + #[error("received a message that is not valid in the current session state")] + InvalidMessageInCurrentState, + #[error("our and their area of interests refer to different namespaces")] + AreaOfInterestNamespaceMismatch, + #[error("our and their area of interests do not overlap")] + AreaOfInterestDoesNotOverlap, + #[error("received an entry which is not authorised")] + UnauthorisedEntryReceived, + #[error("received an unsupported message type")] + UnsupportedMessage, + #[error("the received nonce does not match the received committment")] + BrokenCommittement, + #[error("received an actor message for unknown session")] + SessionNotFound, +} + +impl From for Error { + fn from(_value: Unauthorised) -> Self { + Self::UnauthorisedEntryReceived + } +} +impl From for Error { + fn from(_value: InvalidCapability) -> Self { + Self::InvalidCapability + } +} + +impl From for Error { + fn from(_value: SignatureError) -> Self { + Self::InvalidSignature + } +} diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs new file mode 100644 index 0000000000..53db1b08c2 --- /dev/null +++ b/iroh-willow/src/session/state.rs @@ -0,0 +1,279 @@ +use std::{ + cell::{RefCell, RefMut}, + collections::HashSet, + rc::Rc, + sync::{Arc, Mutex}, +}; + +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use iroh_net::NodeId; +use smallvec::SmallVec; +use tokio::sync::Notify; +use tracing::{debug, info, trace, warn}; + +use crate::{ + proto::{ + challenge::ChallengeState, + grouping::ThreeDRange, + keys::{NamespaceId, NamespacePublicKey}, + meadowcap::McCapability, + wgps::{ + AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, + CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, + ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, + ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, + StaticToken, StaticTokenHandle, + }, + willow::{AuthorisationToken, AuthorisedEntry}, + }, + store::{ + actor::{CoroutineNotifier, Interest}, + ReadonlyStore, SplitAction, Store, SyncConfig, + }, + util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, +}; + +use super::{resource::ScopedResources, Error, Role, Scope, SessionInit}; +pub type SharedSessionState = Rc>; + +#[derive(Debug)] +pub struct SessionState { + pub our_role: Role, + peer: NodeId, + pub our_resources: ScopedResources, + pub their_resources: ScopedResources, + pub reconciliation_started: bool, + pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pub pending_entries: Option, + notify_complete: Arc, + challenge: ChallengeState, + our_current_aoi: Option, +} + +impl SessionState { + pub fn new( + our_role: Role, + peer: NodeId, + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + _their_maximum_payload_size: usize, + ) -> Self { + let challenge_state = ChallengeState::Committed { + our_nonce, + received_commitment, + }; + Self { + our_role, + peer, + challenge: challenge_state, + reconciliation_started: false, + our_resources: Default::default(), + their_resources: Default::default(), + pending_ranges: Default::default(), + pending_entries: Default::default(), + notify_complete: Default::default(), + our_current_aoi: Default::default(), + } + } + fn resources(&self, scope: Scope) -> &ScopedResources { + match scope { + Scope::Ours => &self.our_resources, + Scope::Theirs => &self.their_resources, + } + } + pub fn is_complete(&self) -> bool { + let is_complete = self.reconciliation_started + && self.pending_ranges.is_empty() + && self.pending_entries.is_none(); + trace!( + started = self.reconciliation_started, + pending_ranges = self.pending_ranges.len(), + pending_entries = ?self.pending_entries, + "is_complete {is_complete}" + ); + is_complete + } + + pub fn trigger_notify_if_complete(&mut self) -> bool { + if self.is_complete() { + self.notify_complete.notify_waiters(); + true + } else { + false + } + } + + pub fn notify_complete(&self) -> Arc { + Arc::clone(&self.notify_complete) + } + + pub fn commitment_reveal(&mut self) -> Result { + match self.challenge { + ChallengeState::Committed { our_nonce, .. } => { + Ok(CommitmentReveal { nonce: our_nonce }.into()) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + // let msg = CommitmentReveal { nonce: our_nonce }; + } + + pub fn on_commitment_reveal( + &mut self, + msg: CommitmentReveal, + init: &SessionInit, + ) -> Result<[Message; 2], Error> { + self.challenge.reveal(self.our_role, msg.nonce)?; + self.setup(init) + } + + pub fn on_setup_bind_read_capability( + &mut self, + msg: SetupBindReadCapability, + ) -> Result<(), Error> { + // TODO: verify intersection handle + msg.capability.validate()?; + self.challenge + .verify(msg.capability.receiver(), &msg.signature)?; + self.their_resources.capabilities.bind(msg.capability); + Ok(()) + } + + pub fn on_setup_bind_static_token(&mut self, msg: SetupBindStaticToken) { + self.their_resources.static_tokens.bind(msg.static_token); + } + + fn setup(&mut self, init: &SessionInit) -> Result<[Message; 2], Error> { + let area_of_interest = init.area_of_interest.clone(); + let capability = init.capability.clone(); + + debug!(?init, "init"); + if *capability.receiver() != init.user_secret_key.public_key() { + return Err(Error::WrongSecretKeyForCapability); + } + + // TODO: implement private area intersection + let intersection_handle = 0.into(); + let signature = self.challenge.sign(&init.user_secret_key)?; + + let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); + let msg1 = SetupBindReadCapability { + capability, + handle: intersection_handle, + signature, + }; + + let msg2 = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, + }; + let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg2.clone()); + self.our_current_aoi = Some(our_aoi_handle); + Ok([msg1.into(), msg2.into()]) + } + + pub fn on_setup_bind_area_of_interest( + &mut self, + msg: SetupBindAreaOfInterest, + ) -> Result<(NodeId, Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), Error> { + let capability = self + .resources(Scope::Theirs) + .capabilities + .get(&msg.authorisation)?; + capability.try_granted_area(&msg.area_of_interest.area)?; + let their_handle = self.their_resources.areas_of_interest.bind(msg); + let start = if self.our_role == Role::Alfie { + let our_handle = self + .our_current_aoi + .clone() + .ok_or(Error::InvalidMessageInCurrentState)?; + Some((our_handle, their_handle)) + } else { + None + }; + Ok((self.peer, start)) + } + + pub fn on_send_entry(&mut self) -> Result<(), Error> { + let remaining = self + .pending_entries + .as_mut() + .ok_or(Error::InvalidMessageInCurrentState)?; + *remaining -= 1; + if *remaining == 0 { + self.pending_entries = None; + } + Ok(()) + } + + pub fn clear_pending_range_if_some( + &mut self, + our_handle: AreaOfInterestHandle, + pending_range: Option, + ) -> Result<(), Error> { + if let Some(range) = pending_range { + // TODO: avoid clone + if !self.pending_ranges.remove(&(our_handle, range.clone())) { + warn!("received duplicate final reply for range marker"); + Err(Error::InvalidMessageInCurrentState) + } else { + Ok(()) + } + } else { + Ok(()) + } + } + + pub fn bind_our_static_token( + &mut self, + static_token: StaticToken, + ) -> anyhow::Result<(StaticTokenHandle, Option)> { + let (handle, is_new) = self + .our_resources + .static_tokens + .bind_if_new(static_token.clone()); + let msg = is_new.then(|| SetupBindStaticToken { static_token }); + Ok((handle, msg)) + } + + pub fn handle_to_namespace_id( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result { + let aoi = self.resources(scope).areas_of_interest.get(handle)?; + let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; + let namespace_id = capability.granted_namespace().into(); + Ok(namespace_id) + } + + pub fn range_is_authorised( + &self, + range: &ThreeDRange, + receiver_handle: &AreaOfInterestHandle, + sender_handle: &AreaOfInterestHandle, + ) -> Result { + let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; + let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; + if our_namespace != their_namespace { + return Err(Error::AreaOfInterestNamespaceMismatch); + } + let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; + let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; + + if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + return Err(Error::RangeOutsideCapability); + } + Ok(our_namespace.into()) + } + + fn handle_to_aoi( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result<&SetupBindAreaOfInterest, Error> { + self.resources(scope).areas_of_interest.get(handle) + } +} diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 3f477e97c7..35df7bafe2 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -30,8 +30,8 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{Channels, Coroutine, Readyness, SessionState, SessionStateInner, Yield}, - Error, SessionInit, + coroutine::{Channels, Coroutine, Readyness, Yield}, + Error, SessionInit, SharedSessionState, SessionState, }, util::channel::{self, ReadOutcome, Receiver}, }; @@ -170,7 +170,7 @@ pub enum ToActor { InitSession { peer: NodeId, #[debug(skip)] - state: SessionStateInner, + state: SessionState, #[debug(skip)] channels: Channels, // start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, @@ -196,7 +196,7 @@ pub enum ToActor { #[derive(Debug)] struct StorageSession { - state: SessionState, + state: SharedSessionState, channels: Channels, pending: PendingCoroutines, } From fff00fff03db7a42aac555abffacf90bbc960e0d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 8 May 2024 15:29:55 +0200 Subject: [PATCH 022/198] chore: fix & fmt --- iroh-willow/src/net.rs | 28 ++-- iroh-willow/src/proto/meadowcap.rs | 1 - iroh-willow/src/session.rs | 37 +---- iroh-willow/src/session/channel.rs | 0 iroh-willow/src/session/coroutine.rs | 210 ++++++++++++-------------- iroh-willow/src/session/reconciler.rs | 161 -------------------- iroh-willow/src/session/state.rs | 42 ++---- iroh-willow/src/store/actor.rs | 99 +++--------- iroh-willow/src/util/channel.rs | 10 +- 9 files changed, 148 insertions(+), 440 deletions(-) delete mode 100644 iroh-willow/src/session/channel.rs delete mode 100644 iroh-willow/src/session/reconciler.rs diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9d946573ac..ffe9cdef81 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -230,10 +230,10 @@ mod tests { use crate::{ net::run, proto::{ - grouping::{AreaOfInterest}, + grouping::AreaOfInterest, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, - willow::{Entry, Path, SubspaceId}, + willow::{Entry, Path}, }, session::{Role, SessionInit}, store::{ @@ -429,16 +429,16 @@ mod tests { Ok(entries) } - async fn get_entries_debug( - store: &StoreHandle, - namespace: NamespaceId, - ) -> anyhow::Result> { - let entries = get_entries(store, namespace).await?; - let mut entries: Vec<_> = entries - .into_iter() - .map(|e| (e.subspace_id, e.path)) - .collect(); - entries.sort(); - Ok(entries) - } + // async fn get_entries_debug( + // store: &StoreHandle, + // namespace: NamespaceId, + // ) -> anyhow::Result> { + // let entries = get_entries(store, namespace).await?; + // let mut entries: Vec<_> = entries + // .into_iter() + // .map(|e| (e.subspace_id, e.path)) + // .collect(); + // entries.sort(); + // Ok(entries) + // } } diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 795fcb893f..22bac8f7bc 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,4 +1,3 @@ -use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use crate::util::Encoder; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 599da2c3db..247c9d6df4 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,37 +1,4 @@ -use std::{ - collections::{HashSet, VecDeque}, - fmt, - sync::{Arc, Mutex}, -}; - -use ed25519_dalek::SignatureError; - -use iroh_base::{hash::Hash, key::NodeId}; -use tokio::sync::Notify; -use tracing::{debug, info, instrument, trace, warn}; - -use crate::{ - proto::{ - grouping::{AreaOfInterest, NamespacedRange, ThreeDRange}, - keys::{NamespaceId, NamespacePublicKey, UserPublicKey, UserSecretKey, UserSignature}, - meadowcap::InvalidCapability, - wgps::{ - AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, Fingerprint, HandleType, LengthyEntry, LogicalChannel, Message, - ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, ResourceHandle, SetupBindAreaOfInterest, - SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, - }, - willow::{AuthorisationToken, AuthorisedEntry, Unauthorised}, - }, - store::{ - actor::{StoreHandle, ToActor}, - SplitAction, Store, SyncConfig, - }, - util::channel::ReadOutcome, -}; - -use self::{coroutine::Channels, resource::ScopedResources}; +use crate::proto::{grouping::AreaOfInterest, keys::UserSecretKey, wgps::ReadCapability}; pub mod coroutine; mod error; @@ -40,7 +7,7 @@ mod state; mod util; pub use self::error::Error; -pub use self::state::{SharedSessionState, SessionState}; +pub use self::state::{SessionState, SharedSessionState}; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Role { diff --git a/iroh-willow/src/session/channel.rs b/iroh-willow/src/session/channel.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index c7647a57e6..389d5e64a8 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,33 +1,23 @@ use std::{ cell::{RefCell, RefMut}, - collections::HashSet, rc::Rc, - sync::{Arc, Mutex}, }; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; +use genawaiter::sync::Co; use iroh_net::NodeId; -use smallvec::SmallVec; -use tokio::sync::Notify; -use tracing::{debug, info, trace, warn}; + +use tracing::{debug, trace}; use crate::{ proto::{ - challenge::ChallengeState, grouping::ThreeDRange, - keys::{NamespaceId, NamespacePublicKey}, - meadowcap::McCapability, + keys::NamespaceId, wgps::{ - AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, + AreaOfInterestHandle, Fingerprint, LengthyEntry, LogicalChannel, Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, - StaticToken, StaticTokenHandle, + ResourceHandle, StaticToken, StaticTokenHandle, }, - willow::{AuthorisationToken, AuthorisedEntry}, + willow::AuthorisedEntry, }, store::{ actor::{CoroutineNotifier, Interest}, @@ -36,7 +26,7 @@ use crate::{ util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; -use super::{resource::ScopedResources, Error, Role, Scope, SessionInit, SharedSessionState, SessionState}; +use super::{Error, SessionInit, SessionState, SharedSessionState}; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { @@ -339,98 +329,6 @@ impl Coroutine { } } - async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { - self.send(msg).await - } - - async fn send_control(&self, msg: impl Into) -> anyhow::Result<()> { - self.send(msg).await - } - - async fn send(&self, message: impl Into) -> anyhow::Result<()> { - let message: Message = message.into(); - let channel = message.logical_channel(); - // debug!(%message, ?channel, "send"); - let sender = self.channels.sender(channel); - - loop { - match sender.send_or_set_notify(&message)? { - WriteOutcome::Ok => { - debug!(msg=%message, ch=%channel.fmt_short(), "sent"); - break Ok(()); - } - WriteOutcome::BufferFull => { - debug!(msg=%message, ch=%channel.fmt_short(), "sent buf full, yield"); - self.co - .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Send))) - .await; - } - } - } - } - - async fn recv_bulk( - &self, - channel: LogicalChannel, - ) -> Option>> { - let receiver = self.channels.receiver(channel); - let mut buf = SmallVec::<[Message; N]>::new(); - loop { - match receiver.read_message_or_set_notify() { - Err(err) => return Some(Err(err)), - Ok(outcome) => match outcome { - ReadOutcome::Closed => { - if buf.is_empty() { - debug!("recv: closed"); - return None; - } else { - return Some(Ok(buf)); - } - } - ReadOutcome::ReadBufferEmpty => { - if buf.is_empty() { - self.co - .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) - .await; - } else { - return Some(Ok(buf)); - } - } - ReadOutcome::Item(message) => { - debug!(%message, "recv"); - buf.push(message); - if buf.len() == N { - return Some(Ok(buf)); - } - } - }, - } - } - } - async fn recv(&self, channel: LogicalChannel) -> Option> { - let receiver = self.channels.receiver(channel); - loop { - match receiver.read_message_or_set_notify() { - Err(err) => return Some(Err(err)), - Ok(outcome) => match outcome { - ReadOutcome::Closed => { - debug!("recv: closed"); - return None; - } - ReadOutcome::ReadBufferEmpty => { - self.co - .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) - .await; - } - ReadOutcome::Item(message) => { - debug!(%message, "recv"); - return Some(Ok(message)); - } - }, - } - } - } - async fn send_fingerprint( &mut self, range: ThreeDRange, @@ -557,4 +455,96 @@ impl Coroutine { fn state_mut(&mut self) -> RefMut { self.state.borrow_mut() } + + async fn recv(&self, channel: LogicalChannel) -> Option> { + let receiver = self.channels.receiver(channel); + loop { + match receiver.read_message_or_set_notify() { + Err(err) => return Some(Err(err)), + Ok(outcome) => match outcome { + ReadOutcome::Closed => { + debug!("recv: closed"); + return None; + } + ReadOutcome::ReadBufferEmpty => { + self.co + .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) + .await; + } + ReadOutcome::Item(message) => { + debug!(%message, "recv"); + return Some(Ok(message)); + } + }, + } + } + } + + async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { + self.send(msg).await + } + + async fn send_control(&self, msg: impl Into) -> anyhow::Result<()> { + self.send(msg).await + } + + async fn send(&self, message: impl Into) -> anyhow::Result<()> { + let message: Message = message.into(); + let channel = message.logical_channel(); + // debug!(%message, ?channel, "send"); + let sender = self.channels.sender(channel); + + loop { + match sender.send_or_set_notify(&message)? { + WriteOutcome::Ok => { + debug!(msg=%message, ch=%channel.fmt_short(), "sent"); + break Ok(()); + } + WriteOutcome::BufferFull => { + debug!(msg=%message, ch=%channel.fmt_short(), "sent buf full, yield"); + self.co + .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Send))) + .await; + } + } + } + } } +// async fn recv_bulk( +// &self, +// channel: LogicalChannel, +// ) -> Option>> { +// let receiver = self.channels.receiver(channel); +// let mut buf = SmallVec::<[Message; N]>::new(); +// loop { +// match receiver.read_message_or_set_notify() { +// Err(err) => return Some(Err(err)), +// Ok(outcome) => match outcome { +// ReadOutcome::Closed => { +// if buf.is_empty() { +// debug!("recv: closed"); +// return None; +// } else { +// return Some(Ok(buf)); +// } +// } +// ReadOutcome::ReadBufferEmpty => { +// if buf.is_empty() { +// self.co +// .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) +// .await; +// } else { +// return Some(Ok(buf)); +// } +// } +// ReadOutcome::Item(message) => { +// debug!(%message, "recv"); +// buf.push(message); +// if buf.len() == N { +// return Some(Ok(buf)); +// } +// } +// }, +// } +// } +// } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs deleted file mode 100644 index fd48d3baea..0000000000 --- a/iroh-willow/src/session/reconciler.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::collections::VecDeque; - -use super::Error; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; -use tracing::info; - -#[derive(Debug)] -pub enum Yield { - Init, - OutboxFull, - InboxEmpty, - AllDone(usize), -} - -// pub type Coroutine = genawaiter::Coroutine - -#[derive(Debug)] -pub struct Reconciler { - // gen: Option>, // co: genawaiter::sync::Co>, - state: State, -} - -#[derive(Debug)] -pub struct State { - outbox: VecDeque, - inbox: VecDeque, - count: usize, -} - -pub struct WorkerState { - inbox: flume::Receiver, - outbox: flume::Sender, - sum: usize, -} - -pub struct NetState { - outbox: flume::Receiver, - inbox: flume::Sender, -} - -fn create_state(cap: usize) -> (NetState, WorkerState) { - let (outbox_send, outbox_recv) = flume::bounded(cap); - let (inbox_send, inbox_recv) = flume::bounded(cap); - let ws = WorkerState { - inbox: inbox_recv, - outbox: outbox_send, - sum: 0, - }; - let ns = NetState { - inbox: inbox_send, - outbox: outbox_recv, - }; - (ns, ws) -} - -enum WorkerToNet { - MayResume, - Yield, - Finished, - Out(i32), -} - -async fn run_net( - ns: NetState, - recv: flume::Receiver, - send: flume::Sender, -) -> anyhow::Result<()> { - loop { - let mut pending_message = None; - // let mut yieled = true; - tokio::select! { - next = recv.recv_async(), if pending_message.is_none( )=> { - let msg = next?; - // if yielded { - // yielded = false; - // notify_worker(); - // } - if let Err(msg) = ns.inbox.try_send(msg) { - pending_message.insert(msg.into_inner()); - } - } - out = ns.outbox.recv_async() => { - let out = out?; - match out { - WorkerToNet::MayResume => { - if let Some(msg) = pending_message.take() { - ns.inbox.send_async(msg).await?; - } - } - WorkerToNet::Out(msg) => { - send.send_async(msg).await?; - } - WorkerToNet::Finished => break, - WorkerToNet::Yield => { - // yielded = true; - } - } - } - } - } - Ok(()) -} - -// struct SharedState - -impl Reconciler { - pub fn run_worker(&mut self) { - let mut gen = Gen::new(|co| Self::producer(co)); - loop { - match gen.resume_with(&mut self.state) { - GeneratorState::Yielded(val) => { - info!("Yielded: {val:?}") - } - GeneratorState::Complete(res) => { - info!("Complete: {res:?}") - } - } - } - } - - pub fn push_inbox(&mut self, msg: i32) -> bool { - self.state.inbox.push_back(msg); - if self.state.inbox.len() == 2 { - false - } else { - true - } - } - - pub fn drain_outbox(&mut self) -> impl Iterator + '_ { - self.state.outbox.drain(..) - } - - async fn producer(co: Co) -> Result<(), Error> { - loop { - let state = co.yield_(Yield::Init).await; - // exit condition - if state.count > 6 { - co.yield_(Yield::AllDone(state.count)).await; - return Ok(()); - } - - let next = state.inbox.pop_front(); - match next { - None => { - co.yield_(Yield::InboxEmpty).await; - continue; - } - Some(msg) => { - state.outbox.push_back(msg * 17); - if state.outbox.len() == 3 { - co.yield_(Yield::OutboxFull).await; - } - } - } - } - } -} diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 53db1b08c2..24fdd284b5 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,39 +1,19 @@ -use std::{ - cell::{RefCell, RefMut}, - collections::HashSet, - rc::Rc, - sync::{Arc, Mutex}, -}; +use std::{cell::RefCell, collections::HashSet, rc::Rc, sync::Arc}; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; use iroh_net::NodeId; -use smallvec::SmallVec; + use tokio::sync::Notify; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, trace, warn}; -use crate::{ - proto::{ - challenge::ChallengeState, - grouping::ThreeDRange, - keys::{NamespaceId, NamespacePublicKey}, - meadowcap::McCapability, - wgps::{ - AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, - CommitmentReveal, Fingerprint, LengthyEntry, LogicalChannel, Message, ReadCapability, - ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - ResourceHandle, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, - StaticToken, StaticTokenHandle, - }, - willow::{AuthorisationToken, AuthorisedEntry}, - }, - store::{ - actor::{CoroutineNotifier, Interest}, - ReadonlyStore, SplitAction, Store, SyncConfig, +use crate::proto::{ + challenge::ChallengeState, + grouping::ThreeDRange, + keys::NamespaceId, + wgps::{ + AccessChallenge, AreaOfInterestHandle, ChallengeHash, CommitmentReveal, Message, + SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, + StaticTokenHandle, }, - util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; use super::{resource::ScopedResources, Error, Role, Scope, SessionInit}; diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 35df7bafe2..8c1c2dde6a 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -1,48 +1,29 @@ use std::{ cell::RefCell, - collections::{hash_map, HashMap, VecDeque}, + collections::{HashMap, VecDeque}, rc::Rc, - sync::{Arc, Mutex}, + sync::Arc, thread::JoinHandle, }; -use futures::{ - future::{BoxFuture, LocalBoxFuture}, - FutureExt, -}; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; +use futures::{future::LocalBoxFuture, FutureExt}; +use genawaiter::{sync::Gen, GeneratorState}; use tokio::sync::oneshot; -use tracing::{debug, error, error_span, info, instrument, warn, Span}; +use tracing::{debug, error, error_span, instrument, warn, Span}; // use iroh_net::NodeId; use super::Store; use crate::{ - proto::{ - grouping::{NamespacedRange, ThreeDRange}, - keys::NamespaceId, - wgps::{ - AreaOfInterestHandle, HandleType, LogicalChannel, Message, ReconciliationSendEntry, - ResourceHandle, - }, - willow::{AuthorisedEntry, Entry}, - }, + proto::{grouping::ThreeDRange, keys::NamespaceId, willow::Entry}, session::{ coroutine::{Channels, Coroutine, Readyness, Yield}, - Error, SessionInit, SharedSessionState, SessionState, + Error, SessionInit, SessionState, SharedSessionState, }, - util::channel::{self, ReadOutcome, Receiver}, }; use iroh_base::key::NodeId; pub const CHANNEL_CAP: usize = 1024; -// #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] -// pub struct SessionId(u64); -// pub type NodeId = SessionId; - #[derive(Debug, Clone)] pub struct StoreHandle { tx: flume::Sender, @@ -84,16 +65,10 @@ pub struct Notifier { tx: flume::Sender, notify: Readyness, peer: NodeId, - // channel: LogicalChannel, - // direction: Interest, } impl Notifier { - // pub fn channel(&self) -> LogicalChannel { - // self.channel - // } pub async fn notify(&self) -> anyhow::Result<()> { - // let notify = YieldReason::ChannelPending(self.channel, self.direction); let msg = ToActor::Resume { peer: self.peer, notify: self.notify, @@ -102,7 +77,6 @@ impl Notifier { Ok(()) } pub fn notify_sync(&self) -> anyhow::Result<()> { - // let notify = YieldReason::ChannelPending(self.channel, self.direction); let msg = ToActor::Resume { peer: self.peer, notify: self.notify, @@ -173,7 +147,6 @@ pub enum ToActor { state: SessionState, #[debug(skip)] channels: Channels, - // start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, init: SessionInit, }, DropSession { @@ -214,19 +187,19 @@ impl PendingCoroutines { fn push_back(&mut self, pending_on: Readyness, generator: ReconcileGen) { self.get_mut(pending_on).push_back(generator); } - fn push_front(&mut self, pending_on: Readyness, generator: ReconcileGen) { - self.get_mut(pending_on).push_front(generator); - } fn pop_front(&mut self, pending_on: Readyness) -> Option { self.get_mut(pending_on).pop_front() } - fn len(&self, pending_on: &Readyness) -> usize { - self.inner.get(pending_on).map(|v| v.len()).unwrap_or(0) - } - - fn is_empty(&self) -> bool { - self.inner.values().any(|v| !v.is_empty()) - } + // fn push_front(&mut self, pending_on: Readyness, generator: ReconcileGen) { + // self.get_mut(pending_on).push_front(generator); + // } + // fn len(&self, pending_on: &Readyness) -> usize { + // self.inner.get(pending_on).map(|v| v.len()).unwrap_or(0) + // } + // + // fn is_empty(&self) -> bool { + // self.inner.values().any(|v| !v.is_empty()) + // } } #[derive(Debug)] @@ -289,9 +262,6 @@ impl StorageThread { ToActor::Resume { peer, notify } => { self.resume_next(peer, notify)?; } - // ToActor::ResumeRecv { peer, channel } => { - // self.resume_recv(peer, channel)?; - // } ToActor::GetEntries { namespace, reply } => { let store = self.store.borrow(); let entries = store @@ -308,9 +278,6 @@ impl StorageThread { self.sessions.get_mut(peer).ok_or(Error::SessionNotFound) } - fn session(&mut self, peer: &NodeId) -> Result<&StorageSession, Error> { - self.sessions.get(peer).ok_or(Error::SessionNotFound) - } fn start_coroutine( &mut self, peer: NodeId, @@ -342,49 +309,17 @@ impl StorageThread { self.resume_coroutine(peer, (span, generator)) } - // #[instrument(skip_all, fields(session=%peer.fmt_short(),ch=%channel.fmt_short()))] - // fn resume_recv(&mut self, peer: NodeId, channel: LogicalChannel) -> Result<(), Error> { - // let session = self.session(&peer)?; - // debug!("resume"); - // let channel = session.channels.receiver(channel).clone(); - // loop { - // match channel.read_message_or_set_notify()? { - // ReadOutcome::Closed => { - // debug!("yield: Closed"); - // break; - // } - // ReadOutcome::ReadBufferEmpty => { - // debug!("yield: ReadBufferEmpty"); - // break; - // } - // ReadOutcome::Item(message) => { - // debug!(?message, "recv"); - // self.on_message(peer, message)?; - // } - // } - // } - // Ok(()) - // } - #[instrument(skip_all, fields(session=%peer.fmt_short()))] fn resume_next(&mut self, peer: NodeId, notify: Readyness) -> Result<(), Error> { - // debug!(pending = session.pending.len(¬ify), "resume"); - // while let Some(generator) = session.pending.pop_front(notify) { - // self.resume_coroutine(peer, generator); - // } - // Ok(()) - // loop { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let generator = session.pending.pop_front(notify); match generator { Some(generator) => self.resume_coroutine(peer, generator), None => { debug!("nothing to resume"); - // return Ok(()); Ok(()) } } - // } } fn resume_coroutine(&mut self, peer: NodeId, generator: ReconcileGen) -> Result<(), Error> { diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index a678fecf0d..7990c16b77 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -6,9 +6,7 @@ use std::{ use bytes::{Buf, Bytes, BytesMut}; use tokio::sync::Notify; -use tracing::{debug, info, trace}; - -use crate::proto::wgps::Message; +use tracing::{debug, trace}; use super::{DecodeOutcome, Decoder, Encoder}; @@ -300,9 +298,9 @@ impl Sender { notify.notified().await; } - fn remaining_write_capacity(&self) -> usize { - self.shared.lock().unwrap().remaining_write_capacity() - } + // fn remaining_write_capacity(&self) -> usize { + // self.shared.lock().unwrap().remaining_write_capacity() + // } pub fn send_or_set_notify(&self, message: &T) -> anyhow::Result { let mut shared = self.shared.lock().unwrap(); From dcded7cba579e53e6aaac4dc1d391aaada9c9cda Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 9 May 2024 12:57:10 +0200 Subject: [PATCH 023/198] fixed the shutdown errors and other stuff --- iroh-willow/src/net.rs | 302 ++++++++++++------------ iroh-willow/src/proto/grouping.rs | 2 +- iroh-willow/src/proto/keys.rs | 12 + iroh-willow/src/proto/meadowcap.rs | 8 +- iroh-willow/src/proto/wgps.rs | 4 +- iroh-willow/src/proto/willow.rs | 3 + iroh-willow/src/session.rs | 31 ++- iroh-willow/src/session/coroutine.rs | 191 ++++++++------- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/resource.rs | 65 +++--- iroh-willow/src/session/state.rs | 138 +++++------ iroh-willow/src/store/actor.rs | 252 +++++++++++++------- iroh-willow/src/util/channel.rs | 335 ++++++++------------------- 13 files changed, 661 insertions(+), 684 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ffe9cdef81..8751e477cf 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,21 +1,20 @@ use anyhow::ensure; +use futures::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, + sync::oneshot, task::JoinSet, }; -use tracing::{debug, error_span, instrument, trace, Instrument}; +use tracing::{debug, error_span, instrument, trace, warn, Instrument}; use crate::{ proto::wgps::{ AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, - session::{ - coroutine::{Channels, Readyness}, - Role, SessionInit, SessionState, - }, - store::actor::{Interest, Notifier, StoreHandle, ToActor}, + session::{coroutine::Channels, Role, SessionInit, SessionState}, + store::actor::{StoreHandle, ToActor}, util::{ channel::{channel, Receiver, Sender}, Decoder, Encoder, @@ -24,6 +23,8 @@ use crate::{ const CHANNEL_CAP: usize = 1024 * 64; +const ERROR_CODE_CLOSE_GRACEFUL: u16 = 1; + #[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role))] pub async fn run( me: NodeId, @@ -33,6 +34,7 @@ pub async fn run( our_role: Role, init: SessionInit, ) -> anyhow::Result<()> { + let mut join_set = JoinSet::new(); let (mut control_send_stream, mut control_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, @@ -49,33 +51,30 @@ pub async fn run( .await?; debug!("exchanged comittments"); - let (mut reconciliation_send_stream, mut reconciliation_recv_stream) = match our_role { - Role::Alfie => conn.open_bi().await?, - Role::Betty => conn.accept_bi().await?, - }; - reconciliation_send_stream.write_u8(0u8).await?; - reconciliation_recv_stream.read_u8().await?; - debug!("reconcile channel open"); - - let mut join_set = JoinSet::new(); let (control_send, control_recv) = spawn_channel( &mut join_set, - &store, peer, LogicalChannel::Control, CHANNEL_CAP, control_send_stream, control_recv_stream, ); + + let (mut reconciliation_send_stream, mut reconciliation_recv_stream) = match our_role { + Role::Alfie => conn.open_bi().await?, + Role::Betty => conn.accept_bi().await?, + }; + reconciliation_send_stream.write_u8(0u8).await?; + reconciliation_recv_stream.read_u8().await?; let (reconciliation_send, reconciliation_recv) = spawn_channel( &mut join_set, - &store, peer, LogicalChannel::Reconciliation, CHANNEL_CAP, reconciliation_send_stream, reconciliation_recv_stream, ); + debug!("reconcile channel open"); let channels = Channels { control_send, @@ -83,44 +82,46 @@ pub async fn run( reconciliation_send, reconciliation_recv, }; - let state = SessionState::new( - our_role, - peer, - our_nonce, - received_commitment, - max_payload_size, - ); - let on_complete = state.notify_complete(); + let state = SessionState::new(our_role, our_nonce, received_commitment, max_payload_size); - // let control_loop = ControlLoop::new(state, channels.clone(), store.clone(), init); - // - // let control_fut = control_loop.run(); + let (reply, reply_rx) = oneshot::channel(); store .send(ToActor::InitSession { peer, state, - channels: channels.clone(), + channels, init, + reply, }) .await?; - let notified_fut = async move { - on_complete.notified().await; - tracing::info!("reconciliation complete"); - channels.close_send(); + join_set.spawn(async move { + reply_rx.await??; Ok(()) - }; - // join_set.spawn(control_fut.map_err(anyhow::Error::from)); - join_set.spawn(notified_fut); + }); + + join_all(join_set).await +} + +async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { + let mut final_result = Ok(()); while let Some(res) = join_set.join_next().await { - res??; + let res = match res { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err), + Err(err) => Err(err.into()), + }; + if res.is_err() && final_result.is_ok() { + final_result = res; + } else if res.is_err() { + warn!("join error after initial error: {res:?}"); + } } - Ok(()) + final_result } fn spawn_channel( join_set: &mut JoinSet>, - store: &StoreHandle, peer: NodeId, ch: LogicalChannel, cap: usize, @@ -130,70 +131,50 @@ fn spawn_channel( let (send_tx, send_rx) = channel(cap); let (recv_tx, recv_rx) = channel(cap); - let recv_fut = recv_loop( - recv_stream, - recv_tx, - store.notifier(peer, Readyness::Channel(ch, Interest::Recv)), - ) - .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + let recv_fut = recv_loop(recv_stream, recv_tx) + .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) + .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); join_set.spawn(recv_fut); - let send_fut = send_loop( - send_stream, - send_rx, - store.notifier(peer, Readyness::Channel(ch, Interest::Send)), - ) - .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + let send_fut = send_loop(send_stream, send_rx) + .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))) + .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); join_set.spawn(send_fut); (send_tx, recv_rx) } -// #[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] async fn recv_loop( mut recv_stream: quinn::RecvStream, - channel_sender: Sender, - notifier: Notifier, + channel_tx: Sender, ) -> anyhow::Result<()> { - loop { - let buf = recv_stream.read_chunk(CHANNEL_CAP, true).await?; - if let Some(buf) = buf { - channel_sender.write_slice_async(&buf.bytes[..]).await; - trace!(len = buf.bytes.len(), "recv"); - if channel_sender.is_receivable_notify_set() { - trace!("notify"); - notifier.notify().await?; - } - } else { - break; - } + while let Some(buf) = recv_stream.read_chunk(CHANNEL_CAP, true).await? { + channel_tx.write_slice_async(&buf.bytes[..]).await?; + trace!(len = buf.bytes.len(), "recv"); } - channel_sender.close(); - debug!("recv_loop close"); + recv_stream.stop(ERROR_CODE_CLOSE_GRACEFUL.into()).ok(); + channel_tx.close(); Ok(()) } -// #[instrument(skip_all, fields(ch=%notifier.channel().fmt_short()))] async fn send_loop( mut send_stream: quinn::SendStream, - channel_receiver: Receiver, - notifier: Notifier, + channel_rx: Receiver, ) -> anyhow::Result<()> { - while let Some(data) = channel_receiver.read_bytes_async().await { + while let Some(data) = channel_rx.read_bytes_async().await { let len = data.len(); send_stream.write_chunk(data).await?; - debug!(len, "sent"); - if channel_receiver.is_sendable_notify_set() { - debug!("notify"); - notifier.notify().await?; - } + trace!(len, "sent"); + } + match send_stream.finish().await { + Ok(()) => {} + // If the other side closed gracefully, we are good. + Err(quinn::WriteError::Stopped(code)) + if code.into_inner() == ERROR_CODE_CLOSE_GRACEFUL as u64 => {} + Err(err) => return Err(err.into()), } - send_stream.flush().await?; - // send_stream.stopped().await?; - send_stream.finish().await.ok(); - debug!("send_loop close"); Ok(()) } @@ -225,20 +206,22 @@ mod tests { use iroh_base::{hash::Hash, key::SecretKey}; use iroh_net::MagicEndpoint; use rand::SeedableRng; + use rand_core::CryptoRngCore; use tracing::{debug, info}; use crate::{ net::run, proto::{ grouping::AreaOfInterest, - keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserSecretKey}, + keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, - willow::{Entry, Path}, + wgps::ReadCapability, + willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Role, SessionInit}, store::{ actor::{StoreHandle, ToActor}, - MemoryStore, Store, + MemoryStore, }, }; @@ -291,84 +274,36 @@ mod tests { let start = Instant::now(); let mut expected_entries = HashSet::new(); - let mut store_alfie = MemoryStore::default(); - let init_alfie = { - let secret_key = UserSecretKey::generate(&mut rng); - let public_key = secret_key.public_key(); - let read_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, - public_key, - AccessMode::Read, - )); - let write_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, - public_key, - AccessMode::Write, - )); - for i in 0..n_alfie { - let p = format!("alfie{i}"); - let entry = Entry { - namespace_id, - subspace_id: public_key.into(), - path: Path::new(&[p.as_bytes()])?, - timestamp: 10, - payload_length: 2, - payload_digest: Hash::new("cool things"), - }; - expected_entries.insert(entry.clone()); - let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; - store_alfie.ingest_entry(&entry)?; - } - let area_of_interest = AreaOfInterest::full(); - SessionInit { - user_secret_key: secret_key, - capability: read_capability, - area_of_interest, - } - }; - let mut store_betty = MemoryStore::default(); - let init_betty = { - let secret_key = UserSecretKey::generate(&mut rng); - let public_key = secret_key.public_key(); - let read_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, - public_key, - AccessMode::Read, - )); - let write_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, - public_key, - AccessMode::Write, - )); - for i in 0..n_betty { - let p = format!("betty{i}"); - let entry = Entry { - namespace_id, - subspace_id: public_key.into(), - path: Path::new(&[p.as_bytes()])?, - timestamp: 10, - payload_length: 2, - payload_digest: Hash::new("cool things"), - }; - expected_entries.insert(entry.clone()); - let entry = entry.attach_authorisation(write_capability.clone(), &secret_key)?; - store_betty.ingest_entry(&entry)?; - } - let area_of_interest = AreaOfInterest::full(); - SessionInit { - user_secret_key: secret_key, - capability: read_capability, - area_of_interest, - } - }; + let store_alfie = MemoryStore::default(); + let handle_alfie = StoreHandle::spawn(store_alfie, node_id_alfie); + + let store_betty = MemoryStore::default(); + let handle_betty = StoreHandle::spawn(store_betty, node_id_betty); + + let init_alfie = setup_and_insert( + &mut rng, + &handle_alfie, + &namespace_secret, + n_alfie, + &mut expected_entries, + |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), + ) + .await?; + let init_betty = setup_and_insert( + &mut rng, + &handle_betty, + &namespace_secret, + n_betty, + &mut expected_entries, + |n| Path::new(&[b"betty", n.to_string().as_bytes()]), + ) + .await?; debug!("init constructed"); println!("init took {:?}", start.elapsed()); let start = Instant::now(); - let handle_alfie = StoreHandle::spawn(store_alfie, node_id_alfie); - let handle_betty = StoreHandle::spawn(store_betty, node_id_betty); let (res_alfie, res_betty) = tokio::join!( run( node_id_alfie, @@ -405,11 +340,13 @@ mod tests { assert!(res_betty.is_ok()); assert_eq!( get_entries(&handle_alfie, namespace_id).await?, - expected_entries + expected_entries, + "alfie expected entries" ); assert_eq!( get_entries(&handle_betty, namespace_id).await?, - expected_entries + expected_entries, + "bettyexpected entries" ); Ok(()) @@ -429,6 +366,55 @@ mod tests { Ok(entries) } + async fn setup_and_insert( + rng: &mut impl CryptoRngCore, + store: &StoreHandle, + namespace_secret: &NamespaceSecretKey, + count: usize, + track_entries: &mut impl Extend, + path_fn: impl Fn(usize) -> Result, + ) -> anyhow::Result { + let user_secret = UserSecretKey::generate(rng); + let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); + let subspace_id = user_secret.id(); + let namespace_id = namespace_secret.id(); + for i in 0..count { + let path = path_fn(i); + let entry = Entry { + namespace_id, + subspace_id, + path: path.expect("invalid path"), + timestamp: 10, + payload_length: 2, + payload_digest: Hash::new("cool things"), + }; + track_entries.extend([entry.clone()]); + let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; + info!("INGEST {entry:?}"); + store.ingest_entry(entry).await?; + } + let init = SessionInit::with_interest(user_secret, read_cap, AreaOfInterest::full()); + Ok(init) + } + + fn create_capabilities( + namespace_secret: &NamespaceSecretKey, + user_public_key: UserPublicKey, + ) -> (ReadCapability, WriteCapability) { + let read_capability = McCapability::Owned(OwnedCapability::new( + &namespace_secret, + user_public_key, + AccessMode::Read, + )); + let write_capability = McCapability::Owned(OwnedCapability::new( + &namespace_secret, + user_public_key, + AccessMode::Write, + )); + (read_capability, write_capability) + // let init = SessionInit::with_interest(secret_key, read_capability, AreaOfInterest::full()) + } + // async fn get_entries_debug( // store: &StoreHandle, // namespace: NamespaceId, diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index e913e2012e..9ecaeb70ba 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -212,7 +212,7 @@ impl RangeEnd { } /// A grouping of Entries that are among the newest in some store. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash)] pub struct AreaOfInterest { /// To be included in this AreaOfInterest, an Entry must be included in the area. pub area: Area, diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 2d343116dc..9d43e75893 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -332,12 +332,24 @@ pub struct NamespaceSignature(ed25519_dalek::Signature); bytestring!(NamespaceSignature, SIGNATURE_LENGTH); +impl std::hash::Hash for NamespaceSignature { + fn hash(&self, state: &mut H) { + self.0.to_bytes().hash(state); + } +} + /// The signature obtained by signing a message with a [`UserSecretKey`]. #[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct UserSignature(ed25519_dalek::Signature); bytestring!(UserSignature, SIGNATURE_LENGTH); +impl std::hash::Hash for UserSignature { + fn hash(&self, state: &mut H) { + self.0.to_bytes().hash(state); + } +} + /// [`UserPublicKey`] in bytes #[derive( Default, diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 22bac8f7bc..3190158a0c 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -102,7 +102,7 @@ impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { } } -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, derive_more::From)] +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] pub enum McCapability { Communal(CommunalCapability), Owned(OwnedCapability), @@ -158,14 +158,14 @@ impl McCapability { } } -#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] pub enum AccessMode { Read, Write, } /// A capability that authorizes reads or writes in communal namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct CommunalCapability { /// The kind of access this grants. access_mode: AccessMode, @@ -206,7 +206,7 @@ impl CommunalCapability { } /// A capability that authorizes reads or writes in owned namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct OwnedCapability { /// The kind of access this grants. access_mode: AccessMode, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index a282d7772c..054d213349 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -100,8 +100,8 @@ pub enum LogicalChannel { impl LogicalChannel { pub fn fmt_short(&self) -> &str { match self { - LogicalChannel::Control => "C", - LogicalChannel::Reconciliation => "R", + LogicalChannel::Control => "Ctl", + LogicalChannel::Reconciliation => "Rec", } } } diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 89bbc7c462..b1dd9dc19f 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -16,6 +16,9 @@ pub type NamespaceId = keys::NamespaceId; /// A type for identifying subspaces. pub type SubspaceId = keys::UserId; +/// The capability type needed to authorize writes. +pub type WriteCapability = McCapability; + /// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). /// Timestamps are to be interpreted as a time in microseconds since the Unix epoch. pub type Timestamp = u64; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 247c9d6df4..8bee846810 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,3 +1,5 @@ +use std::collections::{HashMap, HashSet}; + use crate::proto::{grouping::AreaOfInterest, keys::UserSecretKey, wgps::ReadCapability}; pub mod coroutine; @@ -9,23 +11,42 @@ mod util; pub use self::error::Error; pub use self::state::{SessionState, SharedSessionState}; +/// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, +/// and the other peer as Betty. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Role { - Betty, + /// The peer that initiated the synchronisation session. Alfie, + /// The peer that accepted the synchronisation session. + Betty, } +/// The bind scope for resources. +/// +/// Resources are bound by either peer #[derive(Copy, Clone, Debug)] pub enum Scope { + /// Resources bound by ourselves. Ours, + /// Resources bound by the other peer. Theirs, } #[derive(Debug)] pub struct SessionInit { pub user_secret_key: UserSecretKey, - // TODO: allow multiple capabilities? - pub capability: ReadCapability, - // TODO: allow multiple areas of interest? - pub area_of_interest: AreaOfInterest, + pub interests: HashMap>, +} + +impl SessionInit { + pub fn with_interest( + user_secret_key: UserSecretKey, + capability: ReadCapability, + area_of_interest: AreaOfInterest, + ) -> Self { + Self { + user_secret_key, + interests: HashMap::from_iter([(capability, HashSet::from_iter([area_of_interest]))]), + } + } } diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 389d5e64a8..6afbd53b74 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -3,6 +3,7 @@ use std::{ rc::Rc, }; +use anyhow::anyhow; use genawaiter::sync::Co; use iroh_net::NodeId; @@ -15,14 +16,11 @@ use crate::{ wgps::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, LogicalChannel, Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - ResourceHandle, StaticToken, StaticTokenHandle, + ResourceHandle, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }, willow::AuthorisedEntry, }, - store::{ - actor::{CoroutineNotifier, Interest}, - ReadonlyStore, SplitAction, Store, SyncConfig, - }, + store::{actor::Interest, ReadonlyStore, SplitAction, Store, SyncConfig}, util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, }; @@ -46,7 +44,7 @@ pub struct Coroutine { pub store_writer: Rc>, pub channels: Channels, pub state: SharedSessionState, - pub notifier: CoroutineNotifier, + // pub waker: CoroutineWaker, #[debug(skip)] pub co: Co, } @@ -60,6 +58,12 @@ pub struct Channels { } impl Channels { + pub fn close_all(&self) { + self.control_send.close(); + self.control_recv.close(); + self.reconciliation_send.close(); + self.reconciliation_recv.close(); + } pub fn close_send(&self) { self.control_send.close(); self.reconciliation_send.close(); @@ -85,6 +89,7 @@ impl Coroutine { mut self, start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, ) -> Result<(), Error> { + debug!(init = start.is_some(), "start reconciliation"); if let Some((our_handle, their_handle)) = start { self.init_reconciliation(our_handle, their_handle).await?; } @@ -92,8 +97,8 @@ impl Coroutine { while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { let message = message?; self.on_reconciliation_message(message).await?; - if self.state_mut().trigger_notify_if_complete() { - break; + if self.state_mut().reconciliation_is_complete() { + self.channels.close_send(); } } @@ -104,46 +109,35 @@ impl Coroutine { let reveal_message = self.state_mut().commitment_reveal()?; self.send_control(reveal_message).await?; + let mut init = Some(init); while let Some(message) = self.recv(LogicalChannel::Control).await { let message = message?; - debug!(%message, "run_control recv"); - self.on_control_message(message, &init).await?; - if self.state_mut().trigger_notify_if_complete() { - break; - } - } - - Ok(()) - } - - async fn on_control_message( - &mut self, - message: Message, - init: &SessionInit, - ) -> Result<(), Error> { - match message { - Message::CommitmentReveal(msg) => { - let setup_messages = self.state_mut().on_commitment_reveal(msg, &init)?; - for message in setup_messages { - debug!(%message, "send"); - self.send_control(message).await?; + match message { + Message::CommitmentReveal(msg) => { + self.state_mut().on_commitment_reveal(msg)?; + let init = init + .take() + .ok_or_else(|| Error::InvalidMessageInCurrentState)?; + self.setup(init).await?; } + Message::SetupBindReadCapability(msg) => { + self.state_mut().on_setup_bind_read_capability(msg)?; + } + Message::SetupBindStaticToken(msg) => { + self.state_mut().on_setup_bind_static_token(msg); + } + Message::SetupBindAreaOfInterest(msg) => { + let start = self.state_mut().on_setup_bind_area_of_interest(msg)?; + // if let Some(start) = st + self.co.yield_(Yield::StartReconciliation(start)).await; + } + Message::ControlFreeHandle(_msg) => { + // TODO: Free handles + } + _ => return Err(Error::UnsupportedMessage), } - Message::SetupBindReadCapability(msg) => { - self.state_mut().on_setup_bind_read_capability(msg)?; - } - Message::SetupBindStaticToken(msg) => { - self.state_mut().on_setup_bind_static_token(msg); - } - Message::SetupBindAreaOfInterest(msg) => { - let (_peer, start) = self.state_mut().on_setup_bind_area_of_interest(msg)?; - self.co.yield_(Yield::StartReconciliation(start)).await; - } - Message::ControlFreeHandle(_msg) => { - // TODO: Free handles - } - _ => return Err(Error::UnsupportedMessage), } + Ok(()) } @@ -162,6 +156,44 @@ impl Coroutine { Ok(()) } + async fn setup(&mut self, init: SessionInit) -> Result<(), Error> { + debug!(?init, "init"); + for (capability, aois) in init.interests.into_iter() { + if *capability.receiver() != init.user_secret_key.public_key() { + return Err(Error::WrongSecretKeyForCapability); + } + + // TODO: implement private area intersection + let intersection_handle = 0.into(); + let (our_capability_handle, message) = self.state_mut().bind_and_sign_capability( + &init.user_secret_key, + intersection_handle, + capability, + )?; + if let Some(message) = message { + self.send_control(message).await?; + } + + for area_of_interest in aois { + // for area in areas_of_interest { + let msg = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, + }; + let (_our_handle, is_new) = self + .state_mut() + .our_resources + .areas_of_interest + // TODO: avoid clone + .bind_if_new(msg.clone()); + if is_new { + self.send_control(msg).await?; + } + } + } + Ok(()) + } + async fn init_reconciliation( &mut self, our_handle: AreaOfInterestHandle, @@ -169,13 +201,16 @@ impl Coroutine { ) -> Result<(), Error> { debug!("init reconciliation"); let mut state = self.state_mut(); - let our_aoi = state.our_resources.areas_of_interest.get(&our_handle)?; - let their_aoi = state.their_resources.areas_of_interest.get(&their_handle)?; + let our_aoi = state.our_resources.areas_of_interest.try_get(&our_handle)?; + let their_aoi = state + .their_resources + .areas_of_interest + .try_get(&their_handle)?; let our_capability = state .our_resources .capabilities - .get(&our_aoi.authorisation)?; + .try_get(&our_aoi.authorisation)?; let namespace: NamespaceId = our_capability.granted_namespace().into(); let common_aoi = &our_aoi @@ -293,7 +328,9 @@ impl Coroutine { } async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { - let static_token = self.get_static_token(message.static_token_handle).await; + let static_token = self + .get_static_token_eventually(message.static_token_handle) + .await; self.state_mut().on_send_entry()?; @@ -308,16 +345,10 @@ impl Coroutine { Ok(()) } - async fn get_static_token(&mut self, handle: StaticTokenHandle) -> StaticToken { + async fn get_static_token_eventually(&mut self, handle: StaticTokenHandle) -> StaticToken { loop { - let mut state = self.state.borrow_mut(); - match state - .their_resources - .static_tokens - .get_or_notify(&handle, || { - self.notifier - .notifier(self.peer, Readyness::Resource(handle.into())) - }) { + let state = self.state.borrow_mut(); + match state.their_resources.static_tokens.get(&handle) { Some(token) => break token.clone(), None => { drop(state); @@ -459,7 +490,7 @@ impl Coroutine { async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); loop { - match receiver.read_message_or_set_notify() { + match receiver.read_message() { Err(err) => return Some(Err(err)), Ok(outcome) => match outcome { ReadOutcome::Closed => { @@ -472,7 +503,7 @@ impl Coroutine { .await; } ReadOutcome::Item(message) => { - debug!(%message, "recv"); + debug!(ch=%channel.fmt_short(), %message, "recv"); return Some(Ok(message)); } }, @@ -495,7 +526,11 @@ impl Coroutine { let sender = self.channels.sender(channel); loop { - match sender.send_or_set_notify(&message)? { + match sender.send(&message)? { + WriteOutcome::Closed => { + debug!("send: closed"); + return Err(anyhow!("channel closed")); + } WriteOutcome::Ok => { debug!(msg=%message, ch=%channel.fmt_short(), "sent"); break Ok(()); @@ -510,41 +545,3 @@ impl Coroutine { } } } -// async fn recv_bulk( -// &self, -// channel: LogicalChannel, -// ) -> Option>> { -// let receiver = self.channels.receiver(channel); -// let mut buf = SmallVec::<[Message; N]>::new(); -// loop { -// match receiver.read_message_or_set_notify() { -// Err(err) => return Some(Err(err)), -// Ok(outcome) => match outcome { -// ReadOutcome::Closed => { -// if buf.is_empty() { -// debug!("recv: closed"); -// return None; -// } else { -// return Some(Ok(buf)); -// } -// } -// ReadOutcome::ReadBufferEmpty => { -// if buf.is_empty() { -// self.co -// .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) -// .await; -// } else { -// return Some(Ok(buf)); -// } -// } -// ReadOutcome::Item(message) => { -// debug!(%message, "recv"); -// buf.push(message); -// if buf.len() == N { -// return Some(Ok(buf)); -// } -// } -// }, -// } -// } -// } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index da918d3c0a..7ceb6c7abe 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -30,6 +30,8 @@ pub enum Error { BrokenCommittement, #[error("received an actor message for unknown session")] SessionNotFound, + #[error("invalid parameters: {0}")] + InvalidParameters(&'static str) } impl From for Error { diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index b27ab07da3..06db355f1e 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -5,7 +5,7 @@ use crate::{ AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }, - store::actor::Notifier, + store::actor::AssignedWaker, }; use super::Error; @@ -17,12 +17,12 @@ pub struct ScopedResources { pub static_tokens: ResourceMap, } impl ScopedResources { - pub fn register_notify(&mut self, handle: ResourceHandle, notify: Notifier) { - tracing::debug!(?handle, "register_notify"); + pub fn register_waker(&mut self, handle: ResourceHandle, waker: AssignedWaker) { + tracing::trace!(?handle, "register_notify"); match handle { - ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_notify(h, notify), - ResourceHandle::Capability(h) => self.capabilities.register_notify(h, notify), - ResourceHandle::StaticToken(h) => self.static_tokens.register_notify(h, notify), + ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_waker(h, waker), + ResourceHandle::Capability(h) => self.capabilities.register_waker(h, waker), + ResourceHandle::StaticToken(h) => self.static_tokens.register_waker(h, waker), ResourceHandle::Intersection(_h) => unimplemented!(), } } @@ -41,7 +41,7 @@ impl ScopedResources { pub struct ResourceMap { next_handle: u64, map: HashMap>, - notify: HashMap>, + wakers: HashMap>, } impl Default for ResourceMap { @@ -49,7 +49,7 @@ impl Default for ResourceMap { Self { next_handle: 0, map: Default::default(), - notify: Default::default(), + wakers: Default::default(), } } } @@ -59,16 +59,20 @@ where H: IsHandle, R: Eq + PartialEq, { + pub fn iter(&self) -> impl Iterator + '_ { + self.map.iter().map(|(h, r)| (h, &r.value)) + } + pub fn bind(&mut self, resource: R) -> H { let handle: H = self.next_handle.into(); self.next_handle += 1; let resource = Resource::new(resource); self.map.insert(handle, resource); - tracing::debug!(?handle, "bind"); - if let Some(mut notify) = self.notify.remove(&handle) { - tracing::debug!(?handle, "notify {}", notify.len()); - for notify in notify.drain(..) { - if let Err(err) = notify.notify_sync() { + tracing::trace!(?handle, "bind"); + if let Some(mut wakers) = self.wakers.remove(&handle) { + tracing::trace!(?handle, "notify {}", wakers.len()); + for waker in wakers.drain(..) { + if let Err(err) = waker.wake() { tracing::warn!(?err, "notify failed for {handle:?}"); } } @@ -76,8 +80,8 @@ where handle } - pub fn register_notify(&mut self, handle: H, notifier: Notifier) { - self.notify.entry(handle).or_default().push_back(notifier) + pub fn register_waker(&mut self, handle: H, notifier: AssignedWaker) { + self.wakers.entry(handle).or_default().push_back(notifier) } pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { @@ -94,7 +98,7 @@ where } } - pub fn get(&self, handle: &H) -> Result<&R, Error> { + pub fn try_get(&self, handle: &H) -> Result<&R, Error> { self.map .get(handle) .as_ref() @@ -102,17 +106,26 @@ where .ok_or_else(|| Error::MissingResource((*handle).into())) } - pub fn get_or_notify(&mut self, handle: &H, notify: impl FnOnce() -> Notifier) -> Option<&R> { - if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { - Some(resource) - } else { - self.notify - .entry(*handle) - .or_default() - .push_back((notify)()); - None - } + pub fn get(&self, handle: &H) -> Option<&R> { + self.map.get(handle).as_ref().map(|r| &r.value) } + + // pub async fn get_eventually(&self, handle: &H) -> Result<&R, Error> { + // if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { + // Some(resource) + // } else { + // // self.on_notify(handle) + // } + // } + + // pub fn get_or_notify(&mut self, handle: &H, notifier: CoroutineWaker) -> Option<&R> { + // if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { + // Some(resource) + // } else { + // self.register_waker(*handle, notifier); + // None + // } + // } } // #[derive(Debug)] diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 24fdd284b5..e860f7ab0f 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,42 +1,35 @@ -use std::{cell::RefCell, collections::HashSet, rc::Rc, sync::Arc}; +use std::{cell::RefCell, collections::HashSet, rc::Rc}; -use iroh_net::NodeId; - -use tokio::sync::Notify; -use tracing::{debug, trace, warn}; +use tracing::{trace, warn}; use crate::proto::{ challenge::ChallengeState, grouping::ThreeDRange, - keys::NamespaceId, + keys::{NamespaceId, UserSecretKey}, wgps::{ - AccessChallenge, AreaOfInterestHandle, ChallengeHash, CommitmentReveal, Message, - SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, - StaticTokenHandle, + AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, CommitmentReveal, + IntersectionHandle, Message, ReadCapability, SetupBindAreaOfInterest, + SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }; -use super::{resource::ScopedResources, Error, Role, Scope, SessionInit}; +use super::{resource::ScopedResources, Error, Role, Scope}; pub type SharedSessionState = Rc>; #[derive(Debug)] pub struct SessionState { pub our_role: Role, - peer: NodeId, pub our_resources: ScopedResources, pub their_resources: ScopedResources, pub reconciliation_started: bool, pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pub pending_entries: Option, - notify_complete: Arc, - challenge: ChallengeState, - our_current_aoi: Option, + pub challenge: ChallengeState, } impl SessionState { pub fn new( our_role: Role, - peer: NodeId, our_nonce: AccessChallenge, received_commitment: ChallengeHash, _their_maximum_payload_size: usize, @@ -47,15 +40,12 @@ impl SessionState { }; Self { our_role, - peer, challenge: challenge_state, reconciliation_started: false, our_resources: Default::default(), their_resources: Default::default(), pending_ranges: Default::default(), pending_entries: Default::default(), - notify_complete: Default::default(), - our_current_aoi: Default::default(), } } fn resources(&self, scope: Scope) -> &ScopedResources { @@ -64,7 +54,7 @@ impl SessionState { Scope::Theirs => &self.their_resources, } } - pub fn is_complete(&self) -> bool { + pub fn reconciliation_is_complete(&self) -> bool { let is_complete = self.reconciliation_started && self.pending_ranges.is_empty() && self.pending_entries.is_none(); @@ -77,19 +67,28 @@ impl SessionState { is_complete } - pub fn trigger_notify_if_complete(&mut self) -> bool { - if self.is_complete() { - self.notify_complete.notify_waiters(); - true - } else { - false - } - } + pub fn bind_and_sign_capability( + &mut self, + user_secret_key: &UserSecretKey, + our_intersection_handle: IntersectionHandle, + capability: ReadCapability, + ) -> Result<(CapabilityHandle, Option), Error> { + let signature = self.challenge.sign(user_secret_key)?; - pub fn notify_complete(&self) -> Arc { - Arc::clone(&self.notify_complete) + let (our_handle, is_new) = self + .our_resources + .capabilities + .bind_if_new(capability.clone()); + let maybe_message = is_new.then(|| SetupBindReadCapability { + capability, + handle: our_intersection_handle, + signature, + }); + Ok((our_handle, maybe_message)) } + // pub fn bind_aoi() + pub fn commitment_reveal(&mut self) -> Result { match self.challenge { ChallengeState::Committed { our_nonce, .. } => { @@ -100,13 +99,9 @@ impl SessionState { // let msg = CommitmentReveal { nonce: our_nonce }; } - pub fn on_commitment_reveal( - &mut self, - msg: CommitmentReveal, - init: &SessionInit, - ) -> Result<[Message; 2], Error> { + pub fn on_commitment_reveal(&mut self, msg: CommitmentReveal) -> Result<(), Error> { self.challenge.reveal(self.our_role, msg.nonce)?; - self.setup(init) + Ok(()) } pub fn on_setup_bind_read_capability( @@ -125,55 +120,43 @@ impl SessionState { self.their_resources.static_tokens.bind(msg.static_token); } - fn setup(&mut self, init: &SessionInit) -> Result<[Message; 2], Error> { - let area_of_interest = init.area_of_interest.clone(); - let capability = init.capability.clone(); - - debug!(?init, "init"); - if *capability.receiver() != init.user_secret_key.public_key() { - return Err(Error::WrongSecretKeyForCapability); - } - - // TODO: implement private area intersection - let intersection_handle = 0.into(); - let signature = self.challenge.sign(&init.user_secret_key)?; - - let our_capability_handle = self.our_resources.capabilities.bind(capability.clone()); - let msg1 = SetupBindReadCapability { - capability, - handle: intersection_handle, - signature, - }; - - let msg2 = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - let our_aoi_handle = self.our_resources.areas_of_interest.bind(msg2.clone()); - self.our_current_aoi = Some(our_aoi_handle); - Ok([msg1.into(), msg2.into()]) - } - pub fn on_setup_bind_area_of_interest( &mut self, msg: SetupBindAreaOfInterest, - ) -> Result<(NodeId, Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), Error> { + ) -> Result, Error> { let capability = self - .resources(Scope::Theirs) + .their_resources .capabilities - .get(&msg.authorisation)?; + .try_get(&msg.authorisation)?; capability.try_granted_area(&msg.area_of_interest.area)?; let their_handle = self.their_resources.areas_of_interest.bind(msg); + + // only initiate reconciliation if we are alfie, and if we have a shared aoi + // TODO: abort if no shared aoi? let start = if self.our_role == Role::Alfie { - let our_handle = self - .our_current_aoi - .clone() - .ok_or(Error::InvalidMessageInCurrentState)?; - Some((our_handle, their_handle)) + self.find_shared_aoi(&their_handle)? + .map(|our_handle| (our_handle, their_handle)) } else { None }; - Ok((self.peer, start)) + Ok(start) + } + + pub fn find_shared_aoi( + &self, + their_handle: &AreaOfInterestHandle, + ) -> Result, Error> { + let their_aoi = self + .their_resources + .areas_of_interest + .try_get(their_handle)?; + let maybe_our_handle = self + .our_resources + .areas_of_interest + .iter() + .find(|(_handle, aoi)| aoi.area().intersection(their_aoi.area()).is_some()) + .map(|(handle, _aoi)| *handle); + Ok(maybe_our_handle) } pub fn on_send_entry(&mut self) -> Result<(), Error> { @@ -223,8 +206,11 @@ impl SessionState { scope: Scope, handle: &AreaOfInterestHandle, ) -> Result { - let aoi = self.resources(scope).areas_of_interest.get(handle)?; - let capability = self.resources(scope).capabilities.get(&aoi.authorisation)?; + let aoi = self.resources(scope).areas_of_interest.try_get(handle)?; + let capability = self + .resources(scope) + .capabilities + .try_get(&aoi.authorisation)?; let namespace_id = capability.granted_namespace().into(); Ok(namespace_id) } @@ -254,6 +240,6 @@ impl SessionState { scope: Scope, handle: &AreaOfInterestHandle, ) -> Result<&SetupBindAreaOfInterest, Error> { - self.resources(scope).areas_of_interest.get(handle) + self.resources(scope).areas_of_interest.try_get(handle) } } diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 8c1c2dde6a..70797c40d6 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -9,12 +9,16 @@ use std::{ use futures::{future::LocalBoxFuture, FutureExt}; use genawaiter::{sync::Gen, GeneratorState}; use tokio::sync::oneshot; -use tracing::{debug, error, error_span, instrument, warn, Span}; +use tracing::{debug, error, error_span, instrument, trace, warn, Span}; // use iroh_net::NodeId; use super::Store; use crate::{ - proto::{grouping::ThreeDRange, keys::NamespaceId, willow::Entry}, + proto::{ + grouping::ThreeDRange, + keys::NamespaceId, + willow::{AuthorisedEntry, Entry}, + }, session::{ coroutine::{Channels, Coroutine, Readyness, Yield}, Error, SessionInit, SessionState, SharedSessionState, @@ -36,60 +40,68 @@ pub enum Interest { Recv, } -#[derive(Debug)] -pub struct CoroutineNotifier { - tx: flume::Sender, +// #[derive(Debug)] +// pub struct Notifier { +// tx: flume::Sender, +// } +// impl Notifier { +// pub async fn notify(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { +// let msg = ToActor::Resume { peer, notify }; +// self.tx.send_async(msg).await?; +// Ok(()) +// } +// pub fn notify_sync(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { +// let msg = ToActor::Resume { peer, notify }; +// self.tx.send(msg)?; +// Ok(()) +// } +// pub fn notifier(&self, peer: NodeId) -> Notifier { +// Notifier { +// tx: self.tx.clone(), +// } +// } +// } + +#[derive(Debug, Clone)] +pub struct AssignedWaker { + waker: CoroutineWaker, + peer: NodeId, + notify: Readyness, } -impl CoroutineNotifier { - pub async fn notify(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { - let msg = ToActor::Resume { peer, notify }; - self.tx.send_async(msg).await?; - Ok(()) - } - pub fn notify_sync(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { - let msg = ToActor::Resume { peer, notify }; - self.tx.send(msg)?; - Ok(()) - } - pub fn notifier(&self, peer: NodeId, notify: Readyness) -> Notifier { - Notifier { - tx: self.tx.clone(), - peer, - notify, - } + +impl AssignedWaker { + pub fn wake(&self) -> anyhow::Result<()> { + self.waker.wake(self.peer, self.notify) } } #[derive(Debug, Clone)] -pub struct Notifier { +pub struct CoroutineWaker { tx: flume::Sender, - notify: Readyness, - peer: NodeId, } -impl Notifier { - pub async fn notify(&self) -> anyhow::Result<()> { - let msg = ToActor::Resume { - peer: self.peer, - notify: self.notify, - }; - self.tx.send_async(msg).await?; - Ok(()) - } - pub fn notify_sync(&self) -> anyhow::Result<()> { - let msg = ToActor::Resume { - peer: self.peer, - notify: self.notify, - }; +impl CoroutineWaker { + pub fn wake(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { + let msg = ToActor::Resume { peer, notify }; + // TODO: deadlock self.tx.send(msg)?; Ok(()) } + + pub fn with_notify(&self, peer: NodeId, notify: Readyness) -> AssignedWaker { + AssignedWaker { + waker: self.clone(), + peer, + notify, + } + } } impl StoreHandle { pub fn spawn(store: S, me: NodeId) -> StoreHandle { let (tx, rx) = flume::bounded(CHANNEL_CAP); - let actor_tx = tx.clone(); + // let actor_tx = tx.clone(); + let waker = CoroutineWaker { tx: tx.clone() }; let join_handle = std::thread::Builder::new() .name("sync-actor".to_string()) .spawn(move || { @@ -100,7 +112,7 @@ impl StoreHandle { store: Rc::new(RefCell::new(store)), sessions: Default::default(), actor_rx: rx, - actor_tx, + waker, }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -118,13 +130,22 @@ impl StoreHandle { self.tx.send(action)?; Ok(()) } - pub fn notifier(&self, peer: NodeId, notify: Readyness) -> Notifier { - Notifier { + pub fn waker(&self) -> CoroutineWaker { + CoroutineWaker { tx: self.tx.clone(), - peer, - notify, } } + pub async fn ingest_entry(&self, entry: AuthorisedEntry) -> anyhow::Result<()> { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::IngestEntry { entry, reply }).await?; + reply_rx.await??; + Ok(()) + } + // + // pub fn ingest_stream(&self, stream: impl Stream) -> Result<()> { + // } + // pub fn ingest_iter(&self, iter: impl ) -> Result<()> { + // } } impl Drop for StoreHandle { @@ -148,10 +169,11 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, + reply: oneshot::Sender>, }, - DropSession { - peer: NodeId, - }, + // DropSession { + // peer: NodeId, + // }, Resume { peer: NodeId, notify: Readyness, @@ -161,6 +183,10 @@ pub enum ToActor { #[debug(skip)] reply: flume::Sender, }, + IngestEntry { + entry: AuthorisedEntry, + reply: oneshot::Sender>, + }, Shutdown { #[debug(skip)] reply: Option>, @@ -168,26 +194,27 @@ pub enum ToActor { } #[derive(Debug)] -struct StorageSession { +struct Session { state: SharedSessionState, channels: Channels, pending: PendingCoroutines, + on_done: oneshot::Sender>, } #[derive(derive_more::Debug, Default)] struct PendingCoroutines { #[debug(skip)] - inner: HashMap>, + inner: HashMap>, } impl PendingCoroutines { - fn get_mut(&mut self, pending_on: Readyness) -> &mut VecDeque { + fn get_mut(&mut self, pending_on: Readyness) -> &mut VecDeque { self.inner.entry(pending_on).or_default() } - fn push_back(&mut self, pending_on: Readyness, generator: ReconcileGen) { + fn push_back(&mut self, pending_on: Readyness, generator: CoroutineState) { self.get_mut(pending_on).push_back(generator); } - fn pop_front(&mut self, pending_on: Readyness) -> Option { + fn pop_front(&mut self, pending_on: Readyness) -> Option { self.get_mut(pending_on).pop_front() } // fn push_front(&mut self, pending_on: Readyness, generator: ReconcileGen) { @@ -205,13 +232,13 @@ impl PendingCoroutines { #[derive(Debug)] pub struct StorageThread { store: Rc>, - sessions: HashMap, + sessions: HashMap, actor_rx: flume::Receiver, - actor_tx: flume::Sender, + waker: CoroutineWaker, // actor_tx: flume::Sender, } type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; -type ReconcileGen = (Span, Gen); +type ReconcileGen = Gen; impl StorageThread { pub fn run(&mut self) -> anyhow::Result<()> { @@ -234,34 +261,47 @@ impl StorageThread { } fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { - debug!(%message, "tick: handle_message"); + trace!(%message, "tick: handle_message"); match message { ToActor::Shutdown { .. } => unreachable!("handled in run"), ToActor::InitSession { peer, state, channels, - init, // start, + init, + reply, } => { - let session = StorageSession { + let session = Session { state: Rc::new(RefCell::new(state)), channels, pending: Default::default(), + on_done: reply, }; self.sessions.insert(peer, session); + debug!("start coroutine control"); - self.start_coroutine( + + if let Err(error) = self.start_coroutine( peer, |routine| routine.run_control(init).boxed_local(), error_span!("control", peer=%peer.fmt_short()), - )?; - } - ToActor::DropSession { peer } => { - self.sessions.remove(&peer); + true, + ) { + warn!(?error, peer=%peer.fmt_short(), "abort session: starting failed"); + self.remove_session(&peer, Err(error)); + } } ToActor::Resume { peer, notify } => { - self.resume_next(peer, notify)?; + if self.sessions.contains_key(&peer) { + if let Err(error) = self.resume_next(peer, notify) { + warn!(?error, peer=%peer.fmt_short(), "abort session: coroutine failed"); + self.remove_session(&peer, Err(error)); + } + } } + // ToActor::DropSession { peer } => { + // self.remove_session(&peer, Ok(())); + // } ToActor::GetEntries { namespace, reply } => { let store = self.store.borrow(); let entries = store @@ -271,11 +311,22 @@ impl StorageThread { reply.send(entry).ok(); } } + ToActor::IngestEntry { entry, reply } => { + let res = self.store.borrow_mut().ingest_entry(&entry); + reply.send(res).ok(); + } } Ok(()) } - fn session_mut(&mut self, peer: &NodeId) -> Result<&mut StorageSession, Error> { - self.sessions.get_mut(peer).ok_or(Error::SessionNotFound) + + fn remove_session(&mut self, peer: &NodeId, result: Result<(), Error>) { + let session = self.sessions.remove(peer); + if let Some(session) = session { + session.channels.close_all(); + session.on_done.send(result).ok(); + } else { + warn!("remove_session called for unknown session"); + } } fn start_coroutine( @@ -283,6 +334,7 @@ impl StorageThread { peer: NodeId, producer: impl FnOnce(Coroutine) -> ReconcileFut, span: Span, + finalizes_session: bool, ) -> Result<(), Error> { let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); @@ -290,23 +342,26 @@ impl StorageThread { let channels = session.channels.clone(); let state = session.state.clone(); let store_writer = Rc::clone(&self.store); - let notifier = CoroutineNotifier { - tx: self.actor_tx.clone(), - }; + // let waker = self.waker.clone(); - let generator = Gen::new(move |co| { + let gen = Gen::new(move |co| { let routine = Coroutine { peer, store_snapshot, store_writer, - notifier, + // waker, channels, state, co, }; (producer)(routine) }); - self.resume_coroutine(peer, (span, generator)) + let state = CoroutineState { + gen, + span, + finalizes_session, + }; + self.resume_coroutine(peer, state) } #[instrument(skip_all, fields(session=%peer.fmt_short()))] @@ -322,19 +377,40 @@ impl StorageThread { } } - fn resume_coroutine(&mut self, peer: NodeId, generator: ReconcileGen) -> Result<(), Error> { - let (span, mut generator) = generator; - let _guard = span.enter(); - debug!("resume"); + fn resume_coroutine(&mut self, peer: NodeId, mut state: CoroutineState) -> Result<(), Error> { + let _guard = state.span.enter(); + trace!(peer=%peer.fmt_short(), "resume"); loop { - match generator.resume() { + match state.gen.resume() { GeneratorState::Yielded(yielded) => { - debug!(?yielded, "yield"); + trace!(?yielded, "yield"); match yielded { - Yield::Pending(notify) => { - let session = self.session_mut(&peer)?; + Yield::Pending(resume_on) => { + let session = + self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; drop(_guard); - session.pending.push_back(notify, (span, generator)); + match resume_on { + Readyness::Channel(ch, interest) => { + let waker = self + .waker + .with_notify(peer, Readyness::Channel(ch, interest)); + match interest { + Interest::Send => { + session.channels.sender(ch).register_waker(waker) + } + Interest::Recv => { + session.channels.receiver(ch).register_waker(waker) + } + }; + } + Readyness::Resource(handle) => { + let waker = + self.waker.with_notify(peer, Readyness::Resource(handle)); + let mut state = session.state.borrow_mut(); + state.their_resources.register_waker(handle, waker); + } + } + session.pending.push_back(resume_on, state); break Ok(()); } Yield::StartReconciliation(start) => { @@ -343,15 +419,25 @@ impl StorageThread { peer, |routine| routine.run_reconciliation(start).boxed_local(), error_span!("reconcile"), + false, )?; } } } GeneratorState::Complete(res) => { debug!(?res, "complete"); - break res; + if res.is_err() || state.finalizes_session { + self.remove_session(&peer, res) + } + break Ok(()); } } } } } + +struct CoroutineState { + gen: ReconcileGen, + span: Span, + finalizes_session: bool, +} diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 7990c16b77..6a25a4eb6d 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -4,21 +4,51 @@ use std::{ sync::{Arc, Mutex}, }; +use anyhow::anyhow; use bytes::{Buf, Bytes, BytesMut}; use tokio::sync::Notify; -use tracing::{debug, trace}; +use tracing::trace; + +use crate::store::actor::AssignedWaker; use super::{DecodeOutcome, Decoder, Encoder}; +pub fn channel(cap: usize) -> (Sender, Receiver) { + let shared = Shared::new(cap); + let shared = Arc::new(Mutex::new(shared)); + let sender = Sender { + shared: shared.clone(), + _ty: PhantomData, + }; + let receiver = Receiver { + shared, + _ty: PhantomData, + }; + (sender, receiver) +} + +#[derive(Debug)] +pub enum ReadOutcome { + ReadBufferEmpty, + Closed, + Item(T), +} + +#[derive(Debug)] +pub enum WriteOutcome { + BufferFull, + Closed, + Ok, +} + #[derive(Debug)] struct Shared { buf: BytesMut, max_buffer_size: usize, notify_readable: Arc, notify_writable: Arc, - write_blocked: bool, - need_read_notify: bool, - need_write_notify: bool, + wakers_on_writable: Vec, + wakers_on_readable: Vec, closed: bool, } @@ -29,48 +59,46 @@ impl Shared { max_buffer_size: cap, notify_readable: Default::default(), notify_writable: Default::default(), - write_blocked: false, - need_read_notify: false, - need_write_notify: false, + wakers_on_writable: Default::default(), + wakers_on_readable: Default::default(), closed: false, } } fn close(&mut self) { self.closed = true; - self.notify_writable.notify_waiters(); - self.notify_readable.notify_waiters(); + self.notify_writable(); + self.notify_readable(); } + fn closed(&self) -> bool { self.closed } - fn read_slice(&self) -> &[u8] { + + fn peek_read(&self) -> &[u8] { &self.buf[..] } - fn read_buf_empty(&self) -> bool { + fn read_is_empty(&self) -> bool { self.buf.is_empty() } fn read_advance(&mut self, cnt: usize) { self.buf.advance(cnt); if cnt > 0 { - // self.write_blocked = false; - self.notify_writable.notify_waiters(); + self.notify_writable(); } } fn read_bytes(&mut self) -> Bytes { let len = self.buf.len(); if len > 0 { - // self.write_blocked = false; - self.notify_writable.notify_waiters(); + self.notify_writable(); } self.buf.split_to(len).freeze() } fn write_slice(&mut self, len: usize) -> Option<&mut [u8]> { if self.remaining_write_capacity() < len { - self.write_blocked = true; None } else { let old_len = self.buf.len(); @@ -83,15 +111,13 @@ impl Shared { fn write_message(&mut self, item: &T) -> anyhow::Result { let len = item.encoded_len(); - // debug!(?item, len = len, "write_message"); + if self.closed() { + return Ok(WriteOutcome::Closed); + } if let Some(slice) = self.write_slice(len) { - // debug!(len = slice.len(), "write_message got slice"); let mut cursor = io::Cursor::new(slice); item.encode_into(&mut cursor)?; - // debug!("RES {res:?}"); - // res?; - self.notify_readable.notify_one(); - // debug!("wrote and notified"); + self.notify_readable(); Ok(WriteOutcome::Ok) } else { Ok(WriteOutcome::BufferFull) @@ -99,7 +125,7 @@ impl Shared { } fn read_message(&mut self) -> anyhow::Result> { - let data = self.read_slice(); + let data = self.peek_read(); trace!("read, remaining {}", data.len()); let res = match T::decode_from(data)? { DecodeOutcome::NeedMoreData => { @@ -117,29 +143,22 @@ impl Shared { Ok(res) } - // fn receiver_want_notify(&mut self::) { - // self.need_read_notify = true; - // } - // fn need_write_notify(&mut self) { - // self.need_write_notify = true; - // } - fn remaining_write_capacity(&self) -> usize { self.max_buffer_size - self.buf.len() } -} - -#[derive(Debug)] -pub enum ReadOutcome { - ReadBufferEmpty, - Closed, - Item(T), -} -#[derive(Debug)] -pub enum WriteOutcome { - BufferFull, - Ok, + fn notify_readable(&mut self) { + self.notify_readable.notify_waiters(); + for waker in self.wakers_on_readable.drain(..) { + waker.wake().ok(); + } + } + fn notify_writable(&mut self) { + self.notify_writable.notify_waiters(); + for waker in self.wakers_on_writable.drain(..) { + waker.wake().ok(); + } + } } #[derive(Debug)] @@ -158,6 +177,10 @@ impl Clone for Receiver { } impl Receiver { + pub fn close(&self) { + self.shared.lock().unwrap().close() + } + pub fn read_bytes(&self) -> Bytes { self.shared.lock().unwrap().read_bytes() } @@ -166,7 +189,7 @@ impl Receiver { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - if !shared.read_buf_empty() { + if !shared.read_is_empty() { return Some(shared.read_bytes()); } if shared.closed() { @@ -178,29 +201,14 @@ impl Receiver { } } - pub fn read_message_or_set_notify(&self) -> anyhow::Result> { + pub fn read_message(&self) -> anyhow::Result> { let mut shared = self.shared.lock().unwrap(); let outcome = shared.read_message()?; - if matches!(outcome, ReadOutcome::ReadBufferEmpty) { - shared.need_read_notify = true; - } Ok(outcome) } - pub fn set_notify_on_receivable(&self) { - self.shared.lock().unwrap().need_read_notify = true; - } - pub fn is_sendable_notify_set(&self) -> bool { - self.shared.lock().unwrap().need_write_notify - } - pub async fn notify_readable(&self) { - let shared = self.shared.lock().unwrap(); - if !shared.read_slice().is_empty() { - return; - } - let notify = shared.notify_readable.clone(); - drop(shared); - notify.notified().await + pub fn register_waker(&self, waker: AssignedWaker) { + self.shared.lock().unwrap().wakers_on_readable.push(waker); } pub async fn recv_async(&self) -> Option> { @@ -213,15 +221,12 @@ impl Receiver { ReadOutcome::ReadBufferEmpty => shared.notify_readable.clone(), ReadOutcome::Closed => return None, ReadOutcome::Item(item) => { - // debug!("read_message_async read"); return Some(Ok(item)); } }, } }; - // debug!("read_message_async NeedMoreData wait"); notify.notified().await; - // debug!("read_message_async NeedMoreData notified"); } } } @@ -242,100 +247,57 @@ impl Clone for Sender { } impl Sender { - // fn write_slice_into(&self, len: usize) -> Option<&mut [u8]> { - // let mut shared = self.shared.lock().unwrap(); - // shared.write_slice(len) - // } - pub fn set_notify_on_sendable(&self) { - self.shared.lock().unwrap().need_write_notify = true; + pub fn close(&self) { + self.shared.lock().unwrap().close() } - pub fn is_receivable_notify_set(&self) -> bool { - self.shared.lock().unwrap().need_read_notify + pub fn register_waker(&self, waker: AssignedWaker) { + self.shared.lock().unwrap().wakers_on_writable.push(waker); } - pub fn close(&self) { - self.shared.lock().unwrap().close() + pub async fn notify_closed(&self) { + tracing::info!("notify_close IN"); + loop { + let notify = { + let shared = self.shared.lock().unwrap(); + if shared.closed() { + tracing::info!("notify_close closed!"); + return; + } else { + tracing::info!("notify_close not closed - wait"); + + } + shared.notify_writable.clone() + }; + notify.notified().await; + } } - // fn write_slice(&self, data: &[u8]) -> bool { - // let mut shared = self.shared.lock().unwrap(); - // match shared.write_slice(data.len()) { - // None => false, - // Some(out) => { - // out.copy_from_slice(data); - // true - // } - // } - // } - - pub async fn write_slice_async(&self, data: &[u8]) { + pub async fn write_slice_async(&self, data: &[u8]) -> anyhow::Result<()> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); + if shared.closed() { + break Err(anyhow!("channel closed")); + } if shared.remaining_write_capacity() < data.len() { let notify = shared.notify_writable.clone(); notify.clone() } else { let out = shared.write_slice(data.len()).expect("just checked"); out.copy_from_slice(data); - shared.notify_readable.notify_waiters(); - break; - // return true; + shared.notify_readable(); + break Ok(()); } }; notify.notified().await; } } - pub async fn notify_writable(&self) { - let shared = self.shared.lock().unwrap(); - if shared.remaining_write_capacity() > 0 { - return; - } - let notify = shared.notify_readable.clone(); - drop(shared); - notify.notified().await; - } - - // fn remaining_write_capacity(&self) -> usize { - // self.shared.lock().unwrap().remaining_write_capacity() - // } - - pub fn send_or_set_notify(&self, message: &T) -> anyhow::Result { - let mut shared = self.shared.lock().unwrap(); - let outcome = shared.write_message(message)?; - if matches!(outcome, WriteOutcome::BufferFull) { - shared.need_write_notify = true; - } - debug!("send buf remaining: {}", shared.remaining_write_capacity()); - Ok(outcome) - } - pub fn send(&self, message: &T) -> anyhow::Result { self.shared.lock().unwrap().write_message(message) } - // pub async fn sNamespacePublicKeyend_co( - // &self, - // message: &T, - // yield_fn: F, - // // co: &genawaiter::sync::Co, - // // yield_value: Y, - // ) -> anyhow::Result<()> - // where - // F: Fn() -> Fut, - // Fut: std::future::Future, - // { - // loop { - // let res = self.shared.lock().unwrap().write_message(message)?; - // match res { - // WriteOutcome::BufferFull => (yield_fn)().await, - // WriteOutcome::Ok => break Ok(()), - // } - // } - // } - pub async fn send_async(&self, message: &T) -> anyhow::Result<()> { loop { let notify = { @@ -343,6 +305,7 @@ impl Sender { match shared.write_message(message)? { WriteOutcome::Ok => return Ok(()), WriteOutcome::BufferFull => shared.notify_writable.clone(), + WriteOutcome::Closed => return Err(anyhow!("channel is closed")), } }; notify.notified().await; @@ -350,105 +313,13 @@ impl Sender { } } -pub fn channel(cap: usize) -> (Sender, Receiver) { - let shared = Shared::new(cap); - let shared = Arc::new(Mutex::new(shared)); - let sender = Sender { - shared: shared.clone(), - _ty: PhantomData, - }; - let receiver = Receiver { - shared, - _ty: PhantomData, - }; - (sender, receiver) -} - -// #[derive(Debug)] -// pub struct ChannelSender { -// id: u64, -// buf: rtrb::Producer, -// // waker: Option, -// } -// -// impl ChannelSender { -// pub fn remaining_capacity(&self) -> usize { -// self.buf.slots() +// pub async fn notify_readable(&self) { +// let shared = self.shared.lock().unwrap(); +// if !shared.peek_read().is_empty() { +// return; // } -// pub fn can_write_message(&mut self, message: &Message) -> bool { -// message.encoded_len() <= self.remaining_capacity() -// } -// -// pub fn write_message(&mut self, message: &Message) -> bool { -// let encoded_len = message.encoded_len(); -// if encoded_len > self.remaining_capacity() { -// return false; -// } -// message.encode_into(&mut self.buf).expect("length checked"); -// if let Some(waker) = self.waker.take() { -// waker.wake(); -// } -// true -// } -// -// pub fn set_waker(&mut self, waker: Waker) { -// self.waker = Some(waker); -// } -// } -// -// #[derive(Debug)] -// pub enum ToStoreActor { -// // NotifyWake(u64, Arc), -// Resume(u64), +// let notify = shared.notify_readable.clone(); +// drop(shared); +// notify.notified().await // } // -// #[derive(Debug)] -// pub struct ChannelReceiver { -// id: u64, -// // buf: rtrb::Consumer, -// buf: BytesMut, -// to_actor: flume::Sender, -// notify_readable: Arc, -// } -// -// impl ChannelReceiver { -// pub async fn read_chunk(&mut self) -> Result, ChunkError> { -// if self.is_empty() { -// self.acquire().await; -// } -// self.buf.read_chunk(self.readable_len()) -// } -// -// pub fn is_empty(&self) -> bool { -// self.buf.is_empty() -// } -// -// pub fn readable_len(&self) -> usize { -// self.buf.slots() -// } -// -// pub async fn resume(&mut self) { -// self.to_actor -// .send_async(ToStoreActor::Resume(self.id)) -// .await -// .unwrap(); -// } -// -// pub async fn acquire(&mut self) { -// if !self.is_empty() { -// return; -// } -// self.notify_readable.notified().await; -// } -// } -// -// pub struct ChannelSender { -// id: u64, -// buf: rtrb::Producer, -// to_actor: flume::Sender, -// notify_readable: Arc, -// } -// -// impl ChannelSender { -// pub -// } From e9cf94b991917fef87246d24f0ac1e9a3a6bca40 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 9 May 2024 22:03:27 +0200 Subject: [PATCH 024/198] better structure for coroutines --- iroh-willow/src/net.rs | 12 +- iroh-willow/src/session/coroutine.rs | 53 +++--- iroh-willow/src/session/resource.rs | 26 ++- iroh-willow/src/store/actor.rs | 268 ++++++++++++++------------- iroh-willow/src/util/channel.rs | 92 +++------ 5 files changed, 208 insertions(+), 243 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 8751e477cf..273ab7e38f 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -84,19 +84,19 @@ pub async fn run( }; let state = SessionState::new(our_role, our_nonce, received_commitment, max_payload_size); - let (reply, reply_rx) = oneshot::channel(); + let (on_done, on_done_rx) = oneshot::channel(); store .send(ToActor::InitSession { peer, state, channels, init, - reply, + on_done, }) .await?; join_set.spawn(async move { - reply_rx.await??; + on_done_rx.await??; Ok(()) }); @@ -151,7 +151,7 @@ async fn recv_loop( channel_tx: Sender, ) -> anyhow::Result<()> { while let Some(buf) = recv_stream.read_chunk(CHANNEL_CAP, true).await? { - channel_tx.write_slice_async(&buf.bytes[..]).await?; + channel_tx.write_all_async(&buf.bytes[..]).await?; trace!(len = buf.bytes.len(), "recv"); } recv_stream.stop(ERROR_CODE_CLOSE_GRACEFUL.into()).ok(); @@ -212,7 +212,7 @@ mod tests { use crate::{ net::run, proto::{ - grouping::AreaOfInterest, + grouping::{AreaOfInterest, ThreeDRange}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, wgps::ReadCapability, @@ -360,6 +360,7 @@ mod tests { .send(ToActor::GetEntries { namespace, reply: tx, + range: ThreeDRange::full() }) .await?; let entries: HashSet<_> = rx.into_stream().collect::>().await; @@ -390,7 +391,6 @@ mod tests { }; track_entries.extend([entry.clone()]); let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; - info!("INGEST {entry:?}"); store.ingest_entry(entry).await?; } let init = SessionInit::with_interest(user_secret, read_cap, AreaOfInterest::full()); diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 6afbd53b74..3b2b0a1ceb 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -5,7 +5,6 @@ use std::{ use anyhow::anyhow; use genawaiter::sync::Co; -use iroh_net::NodeId; use tracing::{debug, trace}; @@ -39,12 +38,10 @@ pub enum Readyness { #[derive(derive_more::Debug)] pub struct Coroutine { - pub peer: NodeId, pub store_snapshot: Rc, pub store_writer: Rc>, pub channels: Channels, pub state: SharedSessionState, - // pub waker: CoroutineWaker, #[debug(skip)] pub co: Co, } @@ -87,16 +84,29 @@ impl Channels { impl Coroutine { pub async fn run_reconciliation( mut self, - start: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, + start_with_aoi: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, ) -> Result<(), Error> { - debug!(init = start.is_some(), "start reconciliation"); - if let Some((our_handle, their_handle)) = start { - self.init_reconciliation(our_handle, their_handle).await?; + debug!(start = start_with_aoi.is_some(), "start reconciliation"); + + // optionally initiate reconciliation with a first fingerprint. only alfie may do this. + if let Some((our_handle, their_handle)) = start_with_aoi { + self.start_reconciliation(our_handle, their_handle).await?; } while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { let message = message?; - self.on_reconciliation_message(message).await?; + trace!(%message, "recv"); + match message { + Message::ReconciliationSendFingerprint(message) => { + self.on_send_fingerprint(message).await? + } + Message::ReconciliationAnnounceEntries(message) => { + self.on_announce_entries(message).await? + } + Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, + _ => return Err(Error::UnsupportedMessage), + }; + if self.state_mut().reconciliation_is_complete() { self.channels.close_send(); } @@ -141,21 +151,6 @@ impl Coroutine { Ok(()) } - async fn on_reconciliation_message(&mut self, message: Message) -> Result<(), Error> { - trace!(%message, "recv"); - match message { - Message::ReconciliationSendFingerprint(message) => { - self.on_send_fingerprint(message).await? - } - Message::ReconciliationAnnounceEntries(message) => { - self.on_announce_entries(message).await? - } - Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, - _ => return Err(Error::UnsupportedMessage), - }; - Ok(()) - } - async fn setup(&mut self, init: SessionInit) -> Result<(), Error> { debug!(?init, "init"); for (capability, aois) in init.interests.into_iter() { @@ -194,7 +189,7 @@ impl Coroutine { Ok(()) } - async fn init_reconciliation( + async fn start_reconciliation( &mut self, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, @@ -339,9 +334,11 @@ impl Coroutine { static_token, message.dynamic_token, )?; + self.store_writer .borrow_mut() .ingest_entry(&authorised_entry)?; + Ok(()) } @@ -490,14 +487,14 @@ impl Coroutine { async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); loop { - match receiver.read_message() { + match receiver.recv_message() { Err(err) => return Some(Err(err)), Ok(outcome) => match outcome { ReadOutcome::Closed => { debug!("recv: closed"); return None; } - ReadOutcome::ReadBufferEmpty => { + ReadOutcome::BufferEmpty => { self.co .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) .await; @@ -526,13 +523,13 @@ impl Coroutine { let sender = self.channels.sender(channel); loop { - match sender.send(&message)? { + match sender.send_message(&message)? { WriteOutcome::Closed => { debug!("send: closed"); return Err(anyhow!("channel closed")); } WriteOutcome::Ok => { - debug!(msg=%message, ch=%channel.fmt_short(), "sent"); + debug!(ch=%channel.fmt_short(), msg=%message, "sent"); break Ok(()); } WriteOutcome::BufferFull => { diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 06db355f1e..7f424d8789 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -1,11 +1,11 @@ -use std::collections::{HashMap, VecDeque}; - -use crate::{ - proto::wgps::{ - AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, - SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, - }, - store::actor::AssignedWaker, +use std::{ + collections::{HashMap, VecDeque}, + task::Waker, +}; + +use crate::proto::wgps::{ + AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, + SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }; use super::Error; @@ -17,7 +17,7 @@ pub struct ScopedResources { pub static_tokens: ResourceMap, } impl ScopedResources { - pub fn register_waker(&mut self, handle: ResourceHandle, waker: AssignedWaker) { + pub fn register_waker(&mut self, handle: ResourceHandle, waker: Waker) { tracing::trace!(?handle, "register_notify"); match handle { ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_waker(h, waker), @@ -41,7 +41,7 @@ impl ScopedResources { pub struct ResourceMap { next_handle: u64, map: HashMap>, - wakers: HashMap>, + wakers: HashMap>, } impl Default for ResourceMap { @@ -72,15 +72,13 @@ where if let Some(mut wakers) = self.wakers.remove(&handle) { tracing::trace!(?handle, "notify {}", wakers.len()); for waker in wakers.drain(..) { - if let Err(err) = waker.wake() { - tracing::warn!(?err, "notify failed for {handle:?}"); - } + waker.wake(); } } handle } - pub fn register_waker(&mut self, handle: H, notifier: AssignedWaker) { + pub fn register_waker(&mut self, handle: H, notifier: Waker) { self.wakers.entry(handle).or_default().push_back(notifier) } diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 70797c40d6..b420cb0716 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -1,16 +1,16 @@ use std::{ cell::RefCell, - collections::{HashMap, VecDeque}, + collections::{HashMap, HashSet}, rc::Rc, sync::Arc, + task::Wake, thread::JoinHandle, }; use futures::{future::LocalBoxFuture, FutureExt}; use genawaiter::{sync::Gen, GeneratorState}; use tokio::sync::oneshot; -use tracing::{debug, error, error_span, instrument, trace, warn, Span}; -// use iroh_net::NodeId; +use tracing::{debug, error, error_span, trace, warn, Span}; use super::Store; use crate::{ @@ -28,6 +28,8 @@ use iroh_base::key::NodeId; pub const CHANNEL_CAP: usize = 1024; +pub type SessionId = NodeId; + #[derive(Debug, Clone)] pub struct StoreHandle { tx: flume::Sender, @@ -64,44 +66,51 @@ pub enum Interest { #[derive(Debug, Clone)] pub struct AssignedWaker { - waker: CoroutineWaker, - peer: NodeId, - notify: Readyness, + waker: Notifier, + coro_id: CoroId, } impl AssignedWaker { - pub fn wake(&self) -> anyhow::Result<()> { - self.waker.wake(self.peer, self.notify) + pub fn wake(&self) { + self.waker.wake(self.coro_id) + } +} + +impl Wake for AssignedWaker { + fn wake(self: Arc) { + self.waker.wake(self.coro_id) } } #[derive(Debug, Clone)] -pub struct CoroutineWaker { - tx: flume::Sender, +pub struct Notifier { + tx: flume::Sender, } -impl CoroutineWaker { - pub fn wake(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { - let msg = ToActor::Resume { peer, notify }; - // TODO: deadlock - self.tx.send(msg)?; - Ok(()) +impl Notifier { + pub fn wake(&self, coro_id: CoroId) { + self.tx.send(coro_id).ok(); } - pub fn with_notify(&self, peer: NodeId, notify: Readyness) -> AssignedWaker { - AssignedWaker { + pub fn create_waker(&self, coro_id: CoroId) -> std::task::Waker { + Arc::new(AssignedWaker { waker: self.clone(), - peer, - notify, - } + coro_id, + }) + .into() } } impl StoreHandle { pub fn spawn(store: S, me: NodeId) -> StoreHandle { let (tx, rx) = flume::bounded(CHANNEL_CAP); + // This channel only tracks wake to resume messages to coroutines, which are a sinlge u64 + // per wakeup. We want to issue wake calls synchronosuly without blocking, so we use an + // unbounded channel here. The actual capacity is bounded by the number of sessions times + // the number of coroutines per session (which is fixed, currently at 2). + let (notify_tx, notify_rx) = flume::unbounded(); // let actor_tx = tx.clone(); - let waker = CoroutineWaker { tx: tx.clone() }; + let waker = Notifier { tx: notify_tx }; let join_handle = std::thread::Builder::new() .name("sync-actor".to_string()) .spawn(move || { @@ -111,8 +120,12 @@ impl StoreHandle { let mut actor = StorageThread { store: Rc::new(RefCell::new(store)), sessions: Default::default(), - actor_rx: rx, - waker, + coroutines: Default::default(), + + next_coro_id: Default::default(), + inbox_rx: rx, + notify_rx, + notifier: waker, }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -130,11 +143,6 @@ impl StoreHandle { self.tx.send(action)?; Ok(()) } - pub fn waker(&self) -> CoroutineWaker { - CoroutineWaker { - tx: self.tx.clone(), - } - } pub async fn ingest_entry(&self, entry: AuthorisedEntry) -> anyhow::Result<()> { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::IngestEntry { entry, reply }).await?; @@ -169,17 +177,18 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, - reply: oneshot::Sender>, + on_done: oneshot::Sender>, }, // DropSession { // peer: NodeId, // }, - Resume { - peer: NodeId, - notify: Readyness, - }, + // Resume { + // session_id: SessionId, + // coro_id: CoroId, + // }, GetEntries { namespace: NamespaceId, + range: ThreeDRange, #[debug(skip)] reply: flume::Sender, }, @@ -197,69 +206,78 @@ pub enum ToActor { struct Session { state: SharedSessionState, channels: Channels, - pending: PendingCoroutines, + coroutines: HashSet, on_done: oneshot::Sender>, } -#[derive(derive_more::Debug, Default)] -struct PendingCoroutines { - #[debug(skip)] - inner: HashMap>, -} - -impl PendingCoroutines { - fn get_mut(&mut self, pending_on: Readyness) -> &mut VecDeque { - self.inner.entry(pending_on).or_default() - } - fn push_back(&mut self, pending_on: Readyness, generator: CoroutineState) { - self.get_mut(pending_on).push_back(generator); - } - fn pop_front(&mut self, pending_on: Readyness) -> Option { - self.get_mut(pending_on).pop_front() - } - // fn push_front(&mut self, pending_on: Readyness, generator: ReconcileGen) { - // self.get_mut(pending_on).push_front(generator); - // } - // fn len(&self, pending_on: &Readyness) -> usize { - // self.inner.get(pending_on).map(|v| v.len()).unwrap_or(0) - // } - // - // fn is_empty(&self) -> bool { - // self.inner.values().any(|v| !v.is_empty()) - // } -} +type CoroId = u64; #[derive(Debug)] pub struct StorageThread { + inbox_rx: flume::Receiver, + notify_rx: flume::Receiver, store: Rc>, - sessions: HashMap, - actor_rx: flume::Receiver, - waker: CoroutineWaker, // actor_tx: flume::Sender, + sessions: HashMap, + coroutines: HashMap, + next_coro_id: u64, + notifier: Notifier, // actor_tx: flume::Sender, } type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; type ReconcileGen = Gen; +#[derive(derive_more::Debug)] +struct CoroutineState { + id: CoroId, + session_id: SessionId, + #[debug("Generator")] + gen: ReconcileGen, + span: Span, + finalizes_session: bool, +} + impl StorageThread { pub fn run(&mut self) -> anyhow::Result<()> { + enum Op { + Inbox(ToActor), + Notify(CoroId), + } loop { - let message = match self.actor_rx.recv() { - Err(_) => break, - Ok(message) => message, + let op = flume::Selector::new() + .recv(&self.inbox_rx, |r| r.map(Op::Inbox)) + .recv(&self.notify_rx, |r| r.map(Op::Notify)) + .wait(); + + let Ok(op) = op else { + break; }; - match message { - ToActor::Shutdown { reply } => { + + match op { + Op::Inbox(ToActor::Shutdown { reply }) => { if let Some(reply) = reply { reply.send(()).ok(); } break; } - message => self.handle_message(message)?, + Op::Inbox(message) => self.handle_message(message)?, + Op::Notify(coro_id) => self.handle_resume(coro_id), } } Ok(()) } + fn handle_resume(&mut self, coro_id: CoroId) { + if let Some(coro) = self.coroutines.remove(&coro_id) { + let session_id = coro.session_id; + if let Err(error) = self.resume_coroutine(coro) { + warn!(?error, session=%session_id.fmt_short(), "abort session: coroutine failed"); + self.remove_session(&session_id, Err(error)); + } + } else { + debug!(%coro_id, "received wakeup for dropped coroutine"); + } + } + fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { trace!(%message, "tick: handle_message"); match message { @@ -268,14 +286,14 @@ impl StorageThread { peer, state, channels, - init, - reply, + init: setup, + on_done, } => { let session = Session { state: Rc::new(RefCell::new(state)), channels, - pending: Default::default(), - on_done: reply, + coroutines: Default::default(), + on_done, }; self.sessions.insert(peer, session); @@ -283,29 +301,18 @@ impl StorageThread { if let Err(error) = self.start_coroutine( peer, - |routine| routine.run_control(init).boxed_local(), - error_span!("control", peer=%peer.fmt_short()), + |routine| routine.run_control(setup).boxed_local(), + error_span!("session", peer=%peer.fmt_short()), true, ) { warn!(?error, peer=%peer.fmt_short(), "abort session: starting failed"); self.remove_session(&peer, Err(error)); } } - ToActor::Resume { peer, notify } => { - if self.sessions.contains_key(&peer) { - if let Err(error) = self.resume_next(peer, notify) { - warn!(?error, peer=%peer.fmt_short(), "abort session: coroutine failed"); - self.remove_session(&peer, Err(error)); - } - } - } - // ToActor::DropSession { peer } => { - // self.remove_session(&peer, Ok(())); - // } - ToActor::GetEntries { namespace, reply } => { + ToActor::GetEntries { namespace, range, reply } => { let store = self.store.borrow(); let entries = store - .get_entries(namespace, &ThreeDRange::full()) + .get_entries(namespace, &range) .filter_map(|r| r.ok()); for entry in entries { reply.send(entry).ok(); @@ -324,6 +331,9 @@ impl StorageThread { if let Some(session) = session { session.channels.close_all(); session.on_done.send(result).ok(); + for coro_id in session.coroutines { + self.coroutines.remove(&coro_id); + } } else { warn!("remove_session called for unknown session"); } @@ -331,69 +341,64 @@ impl StorageThread { fn start_coroutine( &mut self, - peer: NodeId, + session_id: SessionId, producer: impl FnOnce(Coroutine) -> ReconcileFut, span: Span, finalizes_session: bool, ) -> Result<(), Error> { - let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; + let session = self + .sessions + .get_mut(&session_id) + .ok_or(Error::SessionNotFound)?; let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); let channels = session.channels.clone(); let state = session.state.clone(); let store_writer = Rc::clone(&self.store); - // let waker = self.waker.clone(); let gen = Gen::new(move |co| { let routine = Coroutine { - peer, store_snapshot, store_writer, - // waker, channels, state, co, }; (producer)(routine) }); + let id = { + let next_id = self.next_coro_id; + self.next_coro_id += 1; + next_id + }; + session.coroutines.insert(id); let state = CoroutineState { + id, + session_id, gen, span, finalizes_session, }; - self.resume_coroutine(peer, state) - } - - #[instrument(skip_all, fields(session=%peer.fmt_short()))] - fn resume_next(&mut self, peer: NodeId, notify: Readyness) -> Result<(), Error> { - let session = self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; - let generator = session.pending.pop_front(notify); - match generator { - Some(generator) => self.resume_coroutine(peer, generator), - None => { - debug!("nothing to resume"); - Ok(()) - } - } + self.resume_coroutine(state) } - fn resume_coroutine(&mut self, peer: NodeId, mut state: CoroutineState) -> Result<(), Error> { - let _guard = state.span.enter(); - trace!(peer=%peer.fmt_short(), "resume"); + fn resume_coroutine(&mut self, mut coro: CoroutineState) -> Result<(), Error> { + let _guard = coro.span.enter(); + trace!("resume"); loop { - match state.gen.resume() { + match coro.gen.resume() { GeneratorState::Yielded(yielded) => { trace!(?yielded, "yield"); match yielded { - Yield::Pending(resume_on) => { - let session = - self.sessions.get_mut(&peer).ok_or(Error::SessionNotFound)?; + Yield::Pending(waiting_for) => { + let session = self + .sessions + .get_mut(&coro.session_id) + .ok_or(Error::SessionNotFound)?; drop(_guard); - match resume_on { + match waiting_for { Readyness::Channel(ch, interest) => { - let waker = self - .waker - .with_notify(peer, Readyness::Channel(ch, interest)); + let waker = self.notifier.create_waker(coro.id); match interest { Interest::Send => { session.channels.sender(ch).register_waker(waker) @@ -404,20 +409,19 @@ impl StorageThread { }; } Readyness::Resource(handle) => { - let waker = - self.waker.with_notify(peer, Readyness::Resource(handle)); + let waker = self.notifier.create_waker(coro.id); let mut state = session.state.borrow_mut(); state.their_resources.register_waker(handle, waker); } } - session.pending.push_back(resume_on, state); + self.coroutines.insert(coro.id, coro); break Ok(()); } Yield::StartReconciliation(start) => { debug!("start coroutine reconciliation"); self.start_coroutine( - peer, - |routine| routine.run_reconciliation(start).boxed_local(), + coro.session_id, + |state| state.run_reconciliation(start).boxed_local(), error_span!("reconcile"), false, )?; @@ -426,18 +430,18 @@ impl StorageThread { } GeneratorState::Complete(res) => { debug!(?res, "complete"); - if res.is_err() || state.finalizes_session { - self.remove_session(&peer, res) + if res.is_err() || coro.finalizes_session { + self.remove_session(&coro.session_id, res) } break Ok(()); } } } } -} -struct CoroutineState { - gen: ReconcileGen, - span: Span, - finalizes_session: bool, + // fn next_coro_id(&mut self) -> u64 { + // let next_id = self.next_coro_id; + // self.next_coro_id += 1; + // next_id + // } } diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 6a25a4eb6d..2d9f4d146a 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -2,6 +2,7 @@ use std::{ io, marker::PhantomData, sync::{Arc, Mutex}, + task::Waker, }; use anyhow::anyhow; @@ -9,8 +10,6 @@ use bytes::{Buf, Bytes, BytesMut}; use tokio::sync::Notify; use tracing::trace; -use crate::store::actor::AssignedWaker; - use super::{DecodeOutcome, Decoder, Encoder}; pub fn channel(cap: usize) -> (Sender, Receiver) { @@ -29,7 +28,7 @@ pub fn channel(cap: usize) -> (Sender, Receiver) { #[derive(Debug)] pub enum ReadOutcome { - ReadBufferEmpty, + BufferEmpty, Closed, Item(T), } @@ -47,8 +46,8 @@ struct Shared { max_buffer_size: usize, notify_readable: Arc, notify_writable: Arc, - wakers_on_writable: Vec, - wakers_on_readable: Vec, + wakers_on_writable: Vec, + wakers_on_readable: Vec, closed: bool, } @@ -78,7 +77,7 @@ impl Shared { &self.buf[..] } - fn read_is_empty(&self) -> bool { + fn recv_buf_is_empty(&self) -> bool { self.buf.is_empty() } @@ -89,7 +88,7 @@ impl Shared { } } - fn read_bytes(&mut self) -> Bytes { + fn recv_bytes(&mut self) -> Bytes { let len = self.buf.len(); if len > 0 { self.notify_writable(); @@ -97,7 +96,7 @@ impl Shared { self.buf.split_to(len).freeze() } - fn write_slice(&mut self, len: usize) -> Option<&mut [u8]> { + fn writable_mut(&mut self, len: usize) -> Option<&mut [u8]> { if self.remaining_write_capacity() < len { None } else { @@ -109,12 +108,12 @@ impl Shared { } } - fn write_message(&mut self, item: &T) -> anyhow::Result { + fn send_message(&mut self, item: &T) -> anyhow::Result { let len = item.encoded_len(); if self.closed() { return Ok(WriteOutcome::Closed); } - if let Some(slice) = self.write_slice(len) { + if let Some(slice) = self.writable_mut(len) { let mut cursor = io::Cursor::new(slice); item.encode_into(&mut cursor)?; self.notify_readable(); @@ -124,7 +123,7 @@ impl Shared { } } - fn read_message(&mut self) -> anyhow::Result> { + fn recv_message(&mut self) -> anyhow::Result> { let data = self.peek_read(); trace!("read, remaining {}", data.len()); let res = match T::decode_from(data)? { @@ -132,7 +131,7 @@ impl Shared { if self.closed() { ReadOutcome::Closed } else { - ReadOutcome::ReadBufferEmpty + ReadOutcome::BufferEmpty } } DecodeOutcome::Decoded { item, consumed } => { @@ -150,13 +149,13 @@ impl Shared { fn notify_readable(&mut self) { self.notify_readable.notify_waiters(); for waker in self.wakers_on_readable.drain(..) { - waker.wake().ok(); + waker.wake(); } } fn notify_writable(&mut self) { self.notify_writable.notify_waiters(); for waker in self.wakers_on_writable.drain(..) { - waker.wake().ok(); + waker.wake(); } } } @@ -181,16 +180,16 @@ impl Receiver { self.shared.lock().unwrap().close() } - pub fn read_bytes(&self) -> Bytes { - self.shared.lock().unwrap().read_bytes() + pub fn register_waker(&self, waker: Waker) { + self.shared.lock().unwrap().wakers_on_readable.push(waker); } pub async fn read_bytes_async(&self) -> Option { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - if !shared.read_is_empty() { - return Some(shared.read_bytes()); + if !shared.recv_buf_is_empty() { + return Some(shared.recv_bytes()); } if shared.closed() { return None; @@ -201,24 +200,20 @@ impl Receiver { } } - pub fn read_message(&self) -> anyhow::Result> { + pub fn recv_message(&self) -> anyhow::Result> { let mut shared = self.shared.lock().unwrap(); - let outcome = shared.read_message()?; + let outcome = shared.recv_message()?; Ok(outcome) } - pub fn register_waker(&self, waker: AssignedWaker) { - self.shared.lock().unwrap().wakers_on_readable.push(waker); - } - - pub async fn recv_async(&self) -> Option> { + pub async fn recv_message_async(&self) -> Option> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - match shared.read_message() { + match shared.recv_message() { Err(err) => return Some(Err(err)), Ok(outcome) => match outcome { - ReadOutcome::ReadBufferEmpty => shared.notify_readable.clone(), + ReadOutcome::BufferEmpty => shared.notify_readable.clone(), ReadOutcome::Closed => return None, ReadOutcome::Item(item) => { return Some(Ok(item)); @@ -251,29 +246,11 @@ impl Sender { self.shared.lock().unwrap().close() } - pub fn register_waker(&self, waker: AssignedWaker) { + pub fn register_waker(&self, waker: Waker) { self.shared.lock().unwrap().wakers_on_writable.push(waker); } - pub async fn notify_closed(&self) { - tracing::info!("notify_close IN"); - loop { - let notify = { - let shared = self.shared.lock().unwrap(); - if shared.closed() { - tracing::info!("notify_close closed!"); - return; - } else { - tracing::info!("notify_close not closed - wait"); - - } - shared.notify_writable.clone() - }; - notify.notified().await; - } - } - - pub async fn write_slice_async(&self, data: &[u8]) -> anyhow::Result<()> { + pub async fn write_all_async(&self, data: &[u8]) -> anyhow::Result<()> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); @@ -284,7 +261,7 @@ impl Sender { let notify = shared.notify_writable.clone(); notify.clone() } else { - let out = shared.write_slice(data.len()).expect("just checked"); + let out = shared.writable_mut(data.len()).expect("just checked"); out.copy_from_slice(data); shared.notify_readable(); break Ok(()); @@ -294,15 +271,15 @@ impl Sender { } } - pub fn send(&self, message: &T) -> anyhow::Result { - self.shared.lock().unwrap().write_message(message) + pub fn send_message(&self, message: &T) -> anyhow::Result { + self.shared.lock().unwrap().send_message(message) } - pub async fn send_async(&self, message: &T) -> anyhow::Result<()> { + pub async fn send_message_async(&self, message: &T) -> anyhow::Result<()> { loop { let notify = { let mut shared = self.shared.lock().unwrap(); - match shared.write_message(message)? { + match shared.send_message(message)? { WriteOutcome::Ok => return Ok(()), WriteOutcome::BufferFull => shared.notify_writable.clone(), WriteOutcome::Closed => return Err(anyhow!("channel is closed")), @@ -312,14 +289,3 @@ impl Sender { } } } - -// pub async fn notify_readable(&self) { -// let shared = self.shared.lock().unwrap(); -// if !shared.peek_read().is_empty() { -// return; -// } -// let notify = shared.notify_readable.clone(); -// drop(shared); -// notify.notified().await -// } -// From dfc50762befa5e2c8434744c9f0c0d3bad86af38 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 9 May 2024 23:25:20 +0200 Subject: [PATCH 025/198] reduce some boilerplate --- iroh-willow/src/net.rs | 2 +- iroh-willow/src/session/coroutine.rs | 132 ++++++++++++++------------- iroh-willow/src/session/error.rs | 2 +- iroh-willow/src/session/resource.rs | 27 +++++- iroh-willow/src/store/actor.rs | 70 +++----------- 5 files changed, 113 insertions(+), 120 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 273ab7e38f..29a7bcb2ca 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -360,7 +360,7 @@ mod tests { .send(ToActor::GetEntries { namespace, reply: tx, - range: ThreeDRange::full() + range: ThreeDRange::full(), }) .await?; let entries: HashSet<_> = rx.into_stream().collect::>().await; diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 3b2b0a1ceb..da78db342e 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,9 +1,11 @@ use std::{ cell::{RefCell, RefMut}, + future::Future, + pin::Pin, rc::Rc, + task::{Context, Poll, Waker}, }; -use anyhow::anyhow; use genawaiter::sync::Co; use tracing::{debug, trace}; @@ -15,26 +17,20 @@ use crate::{ wgps::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, LogicalChannel, Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - ResourceHandle, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, + SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }, willow::AuthorisedEntry, }, - store::{actor::Interest, ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{ReadOutcome, Receiver, Sender, WriteOutcome}, + session::{Error, SessionInit, SessionState, SharedSessionState}, + store::{ReadonlyStore, SplitAction, Store, SyncConfig}, + util::channel::{Receiver, Sender}, }; -use super::{Error, SessionInit, SessionState, SharedSessionState}; - #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub enum Yield { - Pending(Readyness), + Pending, StartReconciliation(Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), } -#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] -pub enum Readyness { - Channel(LogicalChannel, Interest), - Resource(ResourceHandle), -} #[derive(derive_more::Debug)] pub struct Coroutine { @@ -42,6 +38,7 @@ pub struct Coroutine { pub store_writer: Rc>, pub channels: Channels, pub state: SharedSessionState, + pub waker: Waker, #[debug(skip)] pub co: Co, } @@ -342,21 +339,6 @@ impl Coroutine { Ok(()) } - async fn get_static_token_eventually(&mut self, handle: StaticTokenHandle) -> StaticToken { - loop { - let state = self.state.borrow_mut(); - match state.their_resources.static_tokens.get(&handle) { - Some(token) => break token.clone(), - None => { - drop(state); - self.co - .yield_(Yield::Pending(Readyness::Resource(handle.into()))) - .await - } - } - } - } - async fn send_fingerprint( &mut self, range: ThreeDRange, @@ -480,32 +462,33 @@ impl Coroutine { Ok(()) } + async fn get_static_token_eventually(&mut self, handle: StaticTokenHandle) -> StaticToken { + // TODO: We can't use yield_pending here because we have to drop state before yielding + loop { + let mut state = self.state.borrow_mut(); + let fut = state + .their_resources + .static_tokens + .get_eventually_cloned(handle); + match self.poll_once(fut) { + Poll::Ready(output) => break output, + Poll::Pending => { + // We need to drop state here, otherwise the RefMut on state would hold + // across the yield. + drop(state); + self.co.yield_(Yield::Pending).await; + } + } + } + } + fn state_mut(&mut self) -> RefMut { self.state.borrow_mut() } async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); - loop { - match receiver.recv_message() { - Err(err) => return Some(Err(err)), - Ok(outcome) => match outcome { - ReadOutcome::Closed => { - debug!("recv: closed"); - return None; - } - ReadOutcome::BufferEmpty => { - self.co - .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) - .await; - } - ReadOutcome::Item(message) => { - debug!(ch=%channel.fmt_short(), %message, "recv"); - return Some(Ok(message)); - } - }, - } - } + self.yield_wake(receiver.recv_message_async()).await } async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { @@ -519,26 +502,51 @@ impl Coroutine { async fn send(&self, message: impl Into) -> anyhow::Result<()> { let message: Message = message.into(); let channel = message.logical_channel(); - // debug!(%message, ?channel, "send"); let sender = self.channels.sender(channel); + self.yield_wake(sender.send_message_async(&message)).await?; + Ok(()) + } + async fn yield_wake(&self, fut: impl Future) -> T { + tokio::pin!(fut); + let mut ctx = Context::from_waker(&self.waker); loop { - match sender.send_message(&message)? { - WriteOutcome::Closed => { - debug!("send: closed"); - return Err(anyhow!("channel closed")); - } - WriteOutcome::Ok => { - debug!(ch=%channel.fmt_short(), msg=%message, "sent"); - break Ok(()); - } - WriteOutcome::BufferFull => { - debug!(msg=%message, ch=%channel.fmt_short(), "sent buf full, yield"); - self.co - .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Send))) - .await; + match Pin::new(&mut fut).poll(&mut ctx) { + Poll::Ready(output) => return output, + Poll::Pending => { + self.co.yield_(Yield::Pending).await; } } } } + + fn poll_once(&self, fut: impl Future) -> Poll { + tokio::pin!(fut); + let mut ctx = Context::from_waker(&self.waker); + Pin::new(&mut fut).poll(&mut ctx) + } + + // TODO: Failed to get this right. See e.g. + // https://users.rust-lang.org/t/lifetime-bounds-to-use-for-future-that-isnt-supposed-to-outlive-calling-scope/89277 + // async fn get_eventually(&mut self, f: F) -> T + // where + // F: for<'a> Fn(RefMut<'a, ScopedResources>) -> Fut, + // Fut: Future, + // T: 'static, + // { + // loop { + // let state = self.state.borrow_mut(); + // let resources = RefMut::map(state, |s| &mut s.their_resources); + // let fut = f(resources); + // match self.poll_once(fut) { + // Poll::Ready(output) => break output, + // Poll::Pending => { + // // We need to drop here, otherwise the RefMut on state would hold + // // across the yield. + // // drop(fut); + // self.co.yield_(Yield::Pending).await; + // } + // } + // } + // } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 7ceb6c7abe..edd8f8410a 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -31,7 +31,7 @@ pub enum Error { #[error("received an actor message for unknown session")] SessionNotFound, #[error("invalid parameters: {0}")] - InvalidParameters(&'static str) + InvalidParameters(&'static str), } impl From for Error { diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 7f424d8789..71d7fc765e 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, - task::Waker, + task::{Poll, Waker}, }; use crate::proto::wgps::{ @@ -108,6 +108,21 @@ where self.map.get(handle).as_ref().map(|r| &r.value) } + pub async fn get_eventually(&mut self, handle: H) -> &R { + std::future::poll_fn(|ctx| { + // cannot use self.get() and self.register_waker() here due to borrow checker. + if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { + Poll::Ready(resource) + } else { + self.wakers + .entry(handle) + .or_default() + .push_back(ctx.waker().to_owned()); + Poll::Pending + } + }) + .await + } // pub async fn get_eventually(&self, handle: &H) -> Result<&R, Error> { // if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { // Some(resource) @@ -125,6 +140,16 @@ where // } // } } +impl ResourceMap +where + H: IsHandle, + R: Eq + PartialEq + Clone + 'static, +{ + pub async fn get_eventually_cloned(&mut self, handle: H) -> R { + let out = self.get_eventually(handle).await; + (*out).clone() + } +} // #[derive(Debug)] // enum ResourceState { diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index b420cb0716..fceba504f3 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -20,7 +20,7 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{Channels, Coroutine, Readyness, Yield}, + coroutine::{Channels, Coroutine, Yield}, Error, SessionInit, SessionState, SharedSessionState, }, }; @@ -42,28 +42,6 @@ pub enum Interest { Recv, } -// #[derive(Debug)] -// pub struct Notifier { -// tx: flume::Sender, -// } -// impl Notifier { -// pub async fn notify(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { -// let msg = ToActor::Resume { peer, notify }; -// self.tx.send_async(msg).await?; -// Ok(()) -// } -// pub fn notify_sync(&self, peer: NodeId, notify: Readyness) -> anyhow::Result<()> { -// let msg = ToActor::Resume { peer, notify }; -// self.tx.send(msg)?; -// Ok(()) -// } -// pub fn notifier(&self, peer: NodeId) -> Notifier { -// Notifier { -// tx: self.tx.clone(), -// } -// } -// } - #[derive(Debug, Clone)] pub struct AssignedWaker { waker: Notifier, @@ -309,11 +287,13 @@ impl StorageThread { self.remove_session(&peer, Err(error)); } } - ToActor::GetEntries { namespace, range, reply } => { + ToActor::GetEntries { + namespace, + range, + reply, + } => { let store = self.store.borrow(); - let entries = store - .get_entries(namespace, &range) - .filter_map(|r| r.ok()); + let entries = store.get_entries(namespace, &range).filter_map(|r| r.ok()); for entry in entries { reply.send(entry).ok(); } @@ -352,25 +332,27 @@ impl StorageThread { .ok_or(Error::SessionNotFound)?; let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); + let id = { + let next_id = self.next_coro_id; + self.next_coro_id += 1; + next_id + }; let channels = session.channels.clone(); let state = session.state.clone(); let store_writer = Rc::clone(&self.store); + let waker = self.notifier.create_waker(id); let gen = Gen::new(move |co| { let routine = Coroutine { store_snapshot, store_writer, channels, + waker, state, co, }; (producer)(routine) }); - let id = { - let next_id = self.next_coro_id; - self.next_coro_id += 1; - next_id - }; session.coroutines.insert(id); let state = CoroutineState { id, @@ -390,30 +372,8 @@ impl StorageThread { GeneratorState::Yielded(yielded) => { trace!(?yielded, "yield"); match yielded { - Yield::Pending(waiting_for) => { - let session = self - .sessions - .get_mut(&coro.session_id) - .ok_or(Error::SessionNotFound)?; + Yield::Pending => { drop(_guard); - match waiting_for { - Readyness::Channel(ch, interest) => { - let waker = self.notifier.create_waker(coro.id); - match interest { - Interest::Send => { - session.channels.sender(ch).register_waker(waker) - } - Interest::Recv => { - session.channels.receiver(ch).register_waker(waker) - } - }; - } - Readyness::Resource(handle) => { - let waker = self.notifier.create_waker(coro.id); - let mut state = session.state.borrow_mut(); - state.their_resources.register_waker(handle, waker); - } - } self.coroutines.insert(coro.id, coro); break Ok(()); } From e65ff915c319bce4dfebe6d5b2e73e60e7852fa8 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 9 May 2024 23:47:09 +0200 Subject: [PATCH 026/198] cleanup --- iroh-willow/src/session/coroutine.rs | 66 +++++++++++++--------------- iroh-willow/src/store/actor.rs | 7 --- 2 files changed, 30 insertions(+), 43 deletions(-) diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index da78db342e..55c06be577 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -104,7 +104,7 @@ impl Coroutine { _ => return Err(Error::UnsupportedMessage), }; - if self.state_mut().reconciliation_is_complete() { + if self.state().reconciliation_is_complete() { self.channels.close_send(); } } @@ -113,29 +113,28 @@ impl Coroutine { } pub async fn run_control(mut self, init: SessionInit) -> Result<(), Error> { - let reveal_message = self.state_mut().commitment_reveal()?; - self.send_control(reveal_message).await?; + let reveal_message = self.state().commitment_reveal()?; + self.send(reveal_message).await?; let mut init = Some(init); while let Some(message) = self.recv(LogicalChannel::Control).await { let message = message?; match message { Message::CommitmentReveal(msg) => { - self.state_mut().on_commitment_reveal(msg)?; + self.state().on_commitment_reveal(msg)?; let init = init .take() .ok_or_else(|| Error::InvalidMessageInCurrentState)?; self.setup(init).await?; } Message::SetupBindReadCapability(msg) => { - self.state_mut().on_setup_bind_read_capability(msg)?; + self.state().on_setup_bind_read_capability(msg)?; } Message::SetupBindStaticToken(msg) => { - self.state_mut().on_setup_bind_static_token(msg); + self.state().on_setup_bind_static_token(msg); } Message::SetupBindAreaOfInterest(msg) => { - let start = self.state_mut().on_setup_bind_area_of_interest(msg)?; - // if let Some(start) = st + let start = self.state().on_setup_bind_area_of_interest(msg)?; self.co.yield_(Yield::StartReconciliation(start)).await; } Message::ControlFreeHandle(_msg) => { @@ -149,7 +148,7 @@ impl Coroutine { } async fn setup(&mut self, init: SessionInit) -> Result<(), Error> { - debug!(?init, "init"); + debug!(?init, "setup"); for (capability, aois) in init.interests.into_iter() { if *capability.receiver() != init.user_secret_key.public_key() { return Err(Error::WrongSecretKeyForCapability); @@ -157,29 +156,27 @@ impl Coroutine { // TODO: implement private area intersection let intersection_handle = 0.into(); - let (our_capability_handle, message) = self.state_mut().bind_and_sign_capability( + let (our_capability_handle, message) = self.state().bind_and_sign_capability( &init.user_secret_key, intersection_handle, capability, )?; if let Some(message) = message { - self.send_control(message).await?; + self.send(message).await?; } for area_of_interest in aois { - // for area in areas_of_interest { let msg = SetupBindAreaOfInterest { area_of_interest, authorisation: our_capability_handle, }; let (_our_handle, is_new) = self - .state_mut() + .state() .our_resources .areas_of_interest - // TODO: avoid clone .bind_if_new(msg.clone()); if is_new { - self.send_control(msg).await?; + self.send(msg).await?; } } } @@ -192,7 +189,7 @@ impl Coroutine { their_handle: AreaOfInterestHandle, ) -> Result<(), Error> { debug!("init reconciliation"); - let mut state = self.state_mut(); + let mut state = self.state(); let our_aoi = state.our_resources.areas_of_interest.try_get(&our_handle)?; let their_aoi = state .their_resources @@ -233,7 +230,7 @@ impl Coroutine { } = message; let namespace = { - let mut state = self.state_mut(); + let mut state = self.state(); state.reconciliation_started = true; state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; state.range_is_authorised(&range, &our_handle, &their_handle)? @@ -252,7 +249,7 @@ impl Coroutine { receiver_handle: their_handle, is_final_reply_for_range: Some(range), }; - self.send_reconciliation(msg).await?; + self.send(msg).await?; } // case 2: fingerprint is empty else if their_fingerprint.is_empty() { @@ -292,7 +289,7 @@ impl Coroutine { } = message; let namespace = { - let mut state = self.state_mut(); + let mut state = self.state(); state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; if state.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); @@ -324,7 +321,7 @@ impl Coroutine { .get_static_token_eventually(message.static_token_handle) .await; - self.state_mut().on_send_entry()?; + self.state().on_send_entry()?; let authorised_entry = AuthorisedEntry::try_from_parts( message.entry.entry, @@ -348,7 +345,7 @@ impl Coroutine { is_final_reply_for_range: Option, ) -> anyhow::Result<()> { { - let mut state = self.state_mut(); + let mut state = self.state(); state.pending_ranges.insert((our_handle, range.clone())); } let msg = ReconciliationSendFingerprint { @@ -358,7 +355,7 @@ impl Coroutine { receiver_handle: their_handle, is_final_reply_for_range, }; - self.send_reconciliation(msg).await?; + self.send(msg).await?; Ok(()) } @@ -373,7 +370,7 @@ impl Coroutine { our_count: Option, ) -> Result<(), Error> { if want_response { - let mut state = self.state_mut(); + let mut state = self.state(); state.pending_ranges.insert((our_handle, range.clone())); } let our_count = match our_count { @@ -389,7 +386,7 @@ impl Coroutine { receiver_handle: their_handle, is_final_reply_for_range, }; - self.send_reconciliation(msg).await?; + self.send(msg).await?; for authorised_entry in self .store_snapshot .get_entries_with_authorisation(namespace, &range) @@ -404,14 +401,14 @@ impl Coroutine { .borrow_mut() .bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { - self.send_control(msg).await?; + self.send(msg).await?; } let msg = ReconciliationSendEntry { entry: LengthyEntry::new(entry, available), static_token_handle, dynamic_token, }; - self.send_reconciliation(msg).await?; + self.send(msg).await?; } Ok(()) } @@ -482,21 +479,17 @@ impl Coroutine { } } - fn state_mut(&mut self) -> RefMut { + fn state(&mut self) -> RefMut { self.state.borrow_mut() } async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); - self.yield_wake(receiver.recv_message_async()).await - } - - async fn send_reconciliation(&self, msg: impl Into) -> anyhow::Result<()> { - self.send(msg).await - } - - async fn send_control(&self, msg: impl Into) -> anyhow::Result<()> { - self.send(msg).await + let message = self.yield_wake(receiver.recv_message_async()).await; + if let Some(Ok(message)) = &message { + debug!(ch=%channel.fmt_short(), %message, "recv"); + } + message } async fn send(&self, message: impl Into) -> anyhow::Result<()> { @@ -504,6 +497,7 @@ impl Coroutine { let channel = message.logical_channel(); let sender = self.channels.sender(channel); self.yield_wake(sender.send_message_async(&message)).await?; + debug!(ch=%channel.fmt_short(), %message, "send"); Ok(()) } diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index fceba504f3..4f047e9b55 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -157,13 +157,6 @@ pub enum ToActor { init: SessionInit, on_done: oneshot::Sender>, }, - // DropSession { - // peer: NodeId, - // }, - // Resume { - // session_id: SessionId, - // coro_id: CoroId, - // }, GetEntries { namespace: NamespaceId, range: ThreeDRange, From ce6a2d4b01e5235aeeb0754cb6ceec8b343a074e Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 10 May 2024 01:48:48 +0200 Subject: [PATCH 027/198] refactor & cleanup channel impl --- iroh-willow/src/net.rs | 29 +-- iroh-willow/src/session/coroutine.rs | 69 +----- iroh-willow/src/session/error.rs | 9 +- iroh-willow/src/store/actor.rs | 72 ++++-- iroh-willow/src/util/channel.rs | 348 ++++++++++++++------------- 5 files changed, 267 insertions(+), 260 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 29a7bcb2ca..e79634b1a5 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -15,10 +15,7 @@ use crate::{ }, session::{coroutine::Channels, Role, SessionInit, SessionState}, store::actor::{StoreHandle, ToActor}, - util::{ - channel::{channel, Receiver, Sender}, - Decoder, Encoder, - }, + util::channel::{inbound_channel, outbound_channel, Reader, Receiver, Sender, Writer}, }; const CHANNEL_CAP: usize = 1024 * 64; @@ -128,42 +125,42 @@ fn spawn_channel( send_stream: quinn::SendStream, recv_stream: quinn::RecvStream, ) -> (Sender, Receiver) { - let (send_tx, send_rx) = channel(cap); - let (recv_tx, recv_rx) = channel(cap); + let (sender, outbound_reader) = outbound_channel(cap); + let (inbound_writer, recveiver) = inbound_channel(cap); - let recv_fut = recv_loop(recv_stream, recv_tx) + let recv_fut = recv_loop(recv_stream, inbound_writer) .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); join_set.spawn(recv_fut); - let send_fut = send_loop(send_stream, send_rx) + let send_fut = send_loop(send_stream, outbound_reader) .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))) .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); join_set.spawn(send_fut); - (send_tx, recv_rx) + (sender, recveiver) } -async fn recv_loop( +async fn recv_loop( mut recv_stream: quinn::RecvStream, - channel_tx: Sender, + mut channel_writer: Writer, ) -> anyhow::Result<()> { while let Some(buf) = recv_stream.read_chunk(CHANNEL_CAP, true).await? { - channel_tx.write_all_async(&buf.bytes[..]).await?; + channel_writer.write_all(&buf.bytes[..]).await?; trace!(len = buf.bytes.len(), "recv"); } recv_stream.stop(ERROR_CODE_CLOSE_GRACEFUL.into()).ok(); - channel_tx.close(); + channel_writer.close(); Ok(()) } -async fn send_loop( +async fn send_loop( mut send_stream: quinn::SendStream, - channel_rx: Receiver, + channel_reader: Reader, ) -> anyhow::Result<()> { - while let Some(data) = channel_rx.read_bytes_async().await { + while let Some(data) = channel_reader.read_bytes().await { let len = data.len(); send_stream.write_chunk(data).await?; trace!(len, "sent"); diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 55c06be577..cd0af2a836 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,13 +1,9 @@ use std::{ cell::{RefCell, RefMut}, - future::Future, - pin::Pin, rc::Rc, - task::{Context, Poll, Waker}, + task::Poll, }; -use genawaiter::sync::Co; - use tracing::{debug, trace}; use crate::{ @@ -22,8 +18,8 @@ use crate::{ willow::AuthorisedEntry, }, session::{Error, SessionInit, SessionState, SharedSessionState}, - store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{Receiver, Sender}, + store::{actor::WakeableCo, ReadonlyStore, SplitAction, Store, SyncConfig}, + util::channel::{ReadError, Receiver, Sender, WriteError}, }; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] @@ -38,9 +34,7 @@ pub struct Coroutine { pub store_writer: Rc>, pub channels: Channels, pub state: SharedSessionState, - pub waker: Waker, - #[debug(skip)] - pub co: Co, + pub co: WakeableCo, } #[derive(Debug, Clone)] @@ -460,14 +454,14 @@ impl Coroutine { } async fn get_static_token_eventually(&mut self, handle: StaticTokenHandle) -> StaticToken { - // TODO: We can't use yield_pending here because we have to drop state before yielding + // TODO: We can't use co.yield_wake here because we have to drop state before yielding loop { let mut state = self.state.borrow_mut(); let fut = state .their_resources .static_tokens .get_eventually_cloned(handle); - match self.poll_once(fut) { + match self.co.poll_once(fut) { Poll::Ready(output) => break output, Poll::Pending => { // We need to drop state here, otherwise the RefMut on state would hold @@ -483,64 +477,21 @@ impl Coroutine { self.state.borrow_mut() } - async fn recv(&self, channel: LogicalChannel) -> Option> { + async fn recv(&self, channel: LogicalChannel) -> Option> { let receiver = self.channels.receiver(channel); - let message = self.yield_wake(receiver.recv_message_async()).await; + let message = self.co.yield_wake(receiver.recv_message()).await; if let Some(Ok(message)) = &message { debug!(ch=%channel.fmt_short(), %message, "recv"); } message } - async fn send(&self, message: impl Into) -> anyhow::Result<()> { + async fn send(&self, message: impl Into) -> Result<(), WriteError> { let message: Message = message.into(); let channel = message.logical_channel(); let sender = self.channels.sender(channel); - self.yield_wake(sender.send_message_async(&message)).await?; + self.co.yield_wake(sender.send_message(&message)).await?; debug!(ch=%channel.fmt_short(), %message, "send"); Ok(()) } - - async fn yield_wake(&self, fut: impl Future) -> T { - tokio::pin!(fut); - let mut ctx = Context::from_waker(&self.waker); - loop { - match Pin::new(&mut fut).poll(&mut ctx) { - Poll::Ready(output) => return output, - Poll::Pending => { - self.co.yield_(Yield::Pending).await; - } - } - } - } - - fn poll_once(&self, fut: impl Future) -> Poll { - tokio::pin!(fut); - let mut ctx = Context::from_waker(&self.waker); - Pin::new(&mut fut).poll(&mut ctx) - } - - // TODO: Failed to get this right. See e.g. - // https://users.rust-lang.org/t/lifetime-bounds-to-use-for-future-that-isnt-supposed-to-outlive-calling-scope/89277 - // async fn get_eventually(&mut self, f: F) -> T - // where - // F: for<'a> Fn(RefMut<'a, ScopedResources>) -> Fut, - // Fut: Future, - // T: 'static, - // { - // loop { - // let state = self.state.borrow_mut(); - // let resources = RefMut::map(state, |s| &mut s.their_resources); - // let fut = f(resources); - // match self.poll_once(fut) { - // Poll::Ready(output) => break output, - // Poll::Pending => { - // // We need to drop here, otherwise the RefMut on state would hold - // // across the yield. - // // drop(fut); - // self.co.yield_(Yield::Pending).await; - // } - // } - // } - // } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index edd8f8410a..8a311a13f9 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,11 +1,18 @@ use ed25519_dalek::SignatureError; -use crate::proto::{meadowcap::InvalidCapability, wgps::ResourceHandle, willow::Unauthorised}; +use crate::{ + proto::{meadowcap::InvalidCapability, wgps::ResourceHandle, willow::Unauthorised}, + util::channel::{ReadError, WriteError}, +}; #[derive(Debug, thiserror::Error)] pub enum Error { #[error("local store failed")] Store(#[from] anyhow::Error), + #[error("failed to receive data: {0}")] + Receive(#[from] ReadError), + #[error("failed to send data: {0}")] + Write(#[from] WriteError), #[error("wrong secret key for capability")] WrongSecretKeyForCapability, #[error("missing resource {0:?}")] diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/store/actor.rs index 4f047e9b55..238a14927f 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/store/actor.rs @@ -1,18 +1,23 @@ use std::{ cell::RefCell, collections::{HashMap, HashSet}, + future::Future, + pin::Pin, rc::Rc, sync::Arc, - task::Wake, + task::{Context, Poll, Wake, Waker}, thread::JoinHandle, }; use futures::{future::LocalBoxFuture, FutureExt}; -use genawaiter::{sync::Gen, GeneratorState}; +use genawaiter::{ + sync::{Co, Gen}, + GeneratorState, +}; +use iroh_base::key::NodeId; use tokio::sync::oneshot; use tracing::{debug, error, error_span, trace, warn, Span}; -use super::Store; use crate::{ proto::{ grouping::ThreeDRange, @@ -23,8 +28,8 @@ use crate::{ coroutine::{Channels, Coroutine, Yield}, Error, SessionInit, SessionState, SharedSessionState, }, + store::Store, }; -use iroh_base::key::NodeId; pub const CHANNEL_CAP: usize = 1024; @@ -93,13 +98,12 @@ impl StoreHandle { .name("sync-actor".to_string()) .spawn(move || { let span = error_span!("store", me=%me.fmt_short()); - let _enter = span.enter(); + let _guard = span.enter(); let mut actor = StorageThread { store: Rc::new(RefCell::new(store)), sessions: Default::default(), coroutines: Default::default(), - next_coro_id: Default::default(), inbox_rx: rx, notify_rx, @@ -127,11 +131,6 @@ impl StoreHandle { reply_rx.await??; Ok(()) } - // - // pub fn ingest_stream(&self, stream: impl Stream) -> Result<()> { - // } - // pub fn ingest_iter(&self, iter: impl ) -> Result<()> { - // } } impl Drop for StoreHandle { @@ -190,19 +189,18 @@ pub struct StorageThread { store: Rc>, sessions: HashMap, coroutines: HashMap, + notifier: Notifier, next_coro_id: u64, - notifier: Notifier, // actor_tx: flume::Sender, } -type ReconcileFut = LocalBoxFuture<'static, Result<(), Error>>; -type ReconcileGen = Gen; +type CoroFut = LocalBoxFuture<'static, Result<(), Error>>; #[derive(derive_more::Debug)] struct CoroutineState { id: CoroId, session_id: SessionId, #[debug("Generator")] - gen: ReconcileGen, + gen: Gen, span: Span, finalizes_session: bool, } @@ -315,7 +313,7 @@ impl StorageThread { fn start_coroutine( &mut self, session_id: SessionId, - producer: impl FnOnce(Coroutine) -> ReconcileFut, + producer: impl FnOnce(Coroutine) -> CoroFut, span: Span, finalizes_session: bool, ) -> Result<(), Error> { @@ -340,9 +338,8 @@ impl StorageThread { store_snapshot, store_writer, channels, - waker, state, - co, + co: WakeableCo::new(co, waker), }; (producer)(routine) }); @@ -391,10 +388,39 @@ impl StorageThread { } } } +} + +#[derive(derive_more::Debug)] +pub struct WakeableCo { + pub waker: Waker, + #[debug(skip)] + pub co: Co, +} + +impl WakeableCo { + pub fn new(co: Co, waker: Waker) -> Self { + Self { co, waker } + } + pub async fn yield_(&self, value: Yield) { + self.co.yield_(value).await + } + + pub async fn yield_wake(&self, fut: impl Future) -> T { + tokio::pin!(fut); + let mut ctx = Context::from_waker(&self.waker); + loop { + match Pin::new(&mut fut).poll(&mut ctx) { + Poll::Ready(output) => return output, + Poll::Pending => { + self.co.yield_(Yield::Pending).await; + } + } + } + } - // fn next_coro_id(&mut self) -> u64 { - // let next_id = self.next_coro_id; - // self.next_coro_id += 1; - // next_id - // } + pub fn poll_once(&self, fut: impl Future) -> Poll { + tokio::pin!(fut); + let mut ctx = Context::from_waker(&self.waker); + Pin::new(&mut fut).poll(&mut ctx) + } } diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 2d9f4d146a..af219f47bf 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -1,102 +1,116 @@ use std::{ + future::poll_fn, io, marker::PhantomData, + pin::Pin, sync::{Arc, Mutex}, - task::Waker, + task::{self, Poll, Waker}, }; -use anyhow::anyhow; use bytes::{Buf, Bytes, BytesMut}; -use tokio::sync::Notify; +use tokio::io::AsyncWrite; use tracing::trace; use super::{DecodeOutcome, Decoder, Encoder}; -pub fn channel(cap: usize) -> (Sender, Receiver) { +pub fn pipe(cap: usize) -> (Writer, Reader) { + let shared = Shared::new(cap); + let writer = Writer { + shared: shared.clone(), + }; + let reader = Reader { shared }; + (writer, reader) +} + +pub fn outbound_channel(cap: usize) -> (Sender, Reader) { let shared = Shared::new(cap); - let shared = Arc::new(Mutex::new(shared)); let sender = Sender { shared: shared.clone(), _ty: PhantomData, }; + let reader = Reader { shared }; + (sender, reader) +} + +pub fn inbound_channel(cap: usize) -> (Writer, Receiver) { + let shared = Shared::new(cap); + let writer = Writer { + shared: shared.clone(), + }; let receiver = Receiver { shared, _ty: PhantomData, }; - (sender, receiver) + (writer, receiver) } -#[derive(Debug)] -pub enum ReadOutcome { - BufferEmpty, +#[derive(Debug, thiserror::Error)] +pub enum WriteError { + #[error("writing to closed channel")] Closed, - Item(T), + #[error("encoding failed: {0}")] + Encode(anyhow::Error), } -#[derive(Debug)] -pub enum WriteOutcome { - BufferFull, - Closed, - Ok, +#[derive(Debug, thiserror::Error)] +pub enum ReadError { + #[error("channel closed with incomplete message")] + ClosedIncomplete, + #[error("decoding failed: {0}")] + Decode(anyhow::Error), } +// Shared state for a in-memory pipe. +// +// Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 #[derive(Debug)] struct Shared { buf: BytesMut, max_buffer_size: usize, - notify_readable: Arc, - notify_writable: Arc, - wakers_on_writable: Vec, - wakers_on_readable: Vec, - closed: bool, + write_wakers: Vec, + read_wakers: Vec, + is_closed: bool, } impl Shared { - fn new(cap: usize) -> Self { - Self { + fn new(cap: usize) -> Arc> { + let shared = Self { buf: BytesMut::new(), max_buffer_size: cap, - notify_readable: Default::default(), - notify_writable: Default::default(), - wakers_on_writable: Default::default(), - wakers_on_readable: Default::default(), - closed: false, - } + write_wakers: Default::default(), + read_wakers: Default::default(), + is_closed: false, + }; + Arc::new(Mutex::new(shared)) } + fn close(&mut self) { - self.closed = true; - self.notify_writable(); - self.notify_readable(); + self.is_closed = true; + self.wake_writable(); + self.wake_readable(); } - fn closed(&self) -> bool { - self.closed + fn is_closed(&self) -> bool { + self.is_closed } - fn peek_read(&self) -> &[u8] { + fn peek(&self) -> &[u8] { &self.buf[..] } - fn recv_buf_is_empty(&self) -> bool { + fn is_empty(&self) -> bool { self.buf.is_empty() } - fn read_advance(&mut self, cnt: usize) { - self.buf.advance(cnt); - if cnt > 0 { - self.notify_writable(); - } - } - - fn recv_bytes(&mut self) -> Bytes { + fn read_bytes(&mut self) -> Bytes { let len = self.buf.len(); if len > 0 { - self.notify_writable(); + self.wake_writable(); } self.buf.split_to(len).freeze() } - fn writable_mut(&mut self, len: usize) -> Option<&mut [u8]> { + fn writable_slice_exact(&mut self, len: usize) -> Option<&mut [u8]> { if self.remaining_write_capacity() < len { None } else { @@ -108,121 +122,147 @@ impl Shared { } } - fn send_message(&mut self, item: &T) -> anyhow::Result { - let len = item.encoded_len(); - if self.closed() { - return Ok(WriteOutcome::Closed); + fn poll_write( + &mut self, + cx: &mut task::Context<'_>, + buf: &[u8], + ) -> Poll> { + if self.is_closed { + return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); + } + let avail = self.max_buffer_size - self.buf.len(); + if avail == 0 { + self.write_wakers.push(cx.waker().to_owned()); + return Poll::Pending; + } + + let len = buf.len().min(avail); + self.buf.extend_from_slice(&buf[..len]); + self.wake_readable(); + Poll::Ready(Ok(len)) + } + + fn poll_read_bytes(&mut self, cx: &mut task::Context<'_>) -> Poll> { + if !self.is_empty() { + Poll::Ready(Some(self.read_bytes())) + } else if self.is_closed() { + Poll::Ready(None) + } else { + self.read_wakers.push(cx.waker().to_owned()); + Poll::Pending + } + } + + fn poll_send_message( + &mut self, + item: &T, + cx: &mut task::Context<'_>, + ) -> Poll> { + if self.is_closed() { + return Poll::Ready(Err(WriteError::Closed)); } - if let Some(slice) = self.writable_mut(len) { + let len = item.encoded_len(); + if let Some(slice) = self.writable_slice_exact(len) { let mut cursor = io::Cursor::new(slice); - item.encode_into(&mut cursor)?; - self.notify_readable(); - Ok(WriteOutcome::Ok) + item.encode_into(&mut cursor).map_err(WriteError::Encode)?; + self.wake_readable(); + Poll::Ready(Ok(())) } else { - Ok(WriteOutcome::BufferFull) + self.write_wakers.push(cx.waker().to_owned()); + Poll::Pending } } - fn recv_message(&mut self) -> anyhow::Result> { - let data = self.peek_read(); - trace!("read, remaining {}", data.len()); - let res = match T::decode_from(data)? { + fn poll_recv_message( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll>> { + let buf = self.peek(); + trace!("read, remaining {}", buf.len()); + if self.is_closed() && self.is_empty() { + return Poll::Ready(None); + } + match T::decode_from(buf).map_err(ReadError::Decode)? { DecodeOutcome::NeedMoreData => { - if self.closed() { - ReadOutcome::Closed + if self.is_closed() { + Poll::Ready(Some(Err(ReadError::ClosedIncomplete))) } else { - ReadOutcome::BufferEmpty + self.read_wakers.push(cx.waker().to_owned()); + Poll::Pending } } DecodeOutcome::Decoded { item, consumed } => { - self.read_advance(consumed); - ReadOutcome::Item(item) + self.buf.advance(consumed); + self.wake_writable(); + Poll::Ready(Some(Ok(item))) } - }; - Ok(res) + } } fn remaining_write_capacity(&self) -> usize { self.max_buffer_size - self.buf.len() } - fn notify_readable(&mut self) { - self.notify_readable.notify_waiters(); - for waker in self.wakers_on_readable.drain(..) { + fn wake_readable(&mut self) { + for waker in self.read_wakers.drain(..) { waker.wake(); } } - fn notify_writable(&mut self) { - self.notify_writable.notify_waiters(); - for waker in self.wakers_on_writable.drain(..) { + fn wake_writable(&mut self) { + for waker in self.write_wakers.drain(..) { waker.wake(); } } } #[derive(Debug)] -pub struct Receiver { +pub struct Reader { shared: Arc>, - _ty: PhantomData, } -impl Clone for Receiver { - fn clone(&self) -> Self { - Self { - shared: Arc::clone(&self.shared), - _ty: PhantomData, - } +impl Reader { + pub fn close(&self) { + self.shared.lock().unwrap().close() + } + + pub async fn read_bytes(&self) -> Option { + poll_fn(|cx| self.shared.lock().unwrap().poll_read_bytes(cx)).await } } -impl Receiver { +#[derive(Debug)] +pub struct Writer { + shared: Arc>, +} + +impl Writer { pub fn close(&self) { self.shared.lock().unwrap().close() } +} - pub fn register_waker(&self, waker: Waker) { - self.shared.lock().unwrap().wakers_on_readable.push(waker); - } - - pub async fn read_bytes_async(&self) -> Option { - loop { - let notify = { - let mut shared = self.shared.lock().unwrap(); - if !shared.recv_buf_is_empty() { - return Some(shared.recv_bytes()); - } - if shared.closed() { - return None; - } - shared.notify_readable.clone() - }; - notify.notified().await - } +impl AsyncWrite for Writer { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + buf: &[u8], + ) -> Poll> { + self.shared.lock().unwrap().poll_write(cx, buf) } - pub fn recv_message(&self) -> anyhow::Result> { - let mut shared = self.shared.lock().unwrap(); - let outcome = shared.recv_message()?; - Ok(outcome) + fn poll_flush( + self: Pin<&mut Self>, + _cx: &mut task::Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) } - pub async fn recv_message_async(&self) -> Option> { - loop { - let notify = { - let mut shared = self.shared.lock().unwrap(); - match shared.recv_message() { - Err(err) => return Some(Err(err)), - Ok(outcome) => match outcome { - ReadOutcome::BufferEmpty => shared.notify_readable.clone(), - ReadOutcome::Closed => return None, - ReadOutcome::Item(item) => { - return Some(Ok(item)); - } - }, - } - }; - notify.notified().await; - } + fn poll_shutdown( + self: Pin<&mut Self>, + _cx: &mut task::Context<'_>, + ) -> Poll> { + self.close(); + Poll::Ready(Ok(())) } } @@ -232,60 +272,46 @@ pub struct Sender { _ty: PhantomData, } -impl Clone for Sender { - fn clone(&self) -> Self { - Self { - shared: Arc::clone(&self.shared), - _ty: PhantomData, - } +impl Sender { + pub fn close(&self) { + self.shared.lock().unwrap().close() + } + + pub async fn send_message(&self, message: &T) -> Result<(), WriteError> { + poll_fn(|cx| self.shared.lock().unwrap().poll_send_message(message, cx)).await } } -impl Sender { +#[derive(Debug)] +pub struct Receiver { + shared: Arc>, + _ty: PhantomData, +} + +impl Receiver { pub fn close(&self) { self.shared.lock().unwrap().close() } - pub fn register_waker(&self, waker: Waker) { - self.shared.lock().unwrap().wakers_on_writable.push(waker); + pub async fn recv_message(&self) -> Option> { + poll_fn(|cx| self.shared.lock().unwrap().poll_recv_message(cx)).await } +} - pub async fn write_all_async(&self, data: &[u8]) -> anyhow::Result<()> { - loop { - let notify = { - let mut shared = self.shared.lock().unwrap(); - if shared.closed() { - break Err(anyhow!("channel closed")); - } - if shared.remaining_write_capacity() < data.len() { - let notify = shared.notify_writable.clone(); - notify.clone() - } else { - let out = shared.writable_mut(data.len()).expect("just checked"); - out.copy_from_slice(data); - shared.notify_readable(); - break Ok(()); - } - }; - notify.notified().await; +impl Clone for Receiver { + fn clone(&self) -> Self { + Self { + shared: Arc::clone(&self.shared), + _ty: PhantomData, } } +} - pub fn send_message(&self, message: &T) -> anyhow::Result { - self.shared.lock().unwrap().send_message(message) - } - - pub async fn send_message_async(&self, message: &T) -> anyhow::Result<()> { - loop { - let notify = { - let mut shared = self.shared.lock().unwrap(); - match shared.send_message(message)? { - WriteOutcome::Ok => return Ok(()), - WriteOutcome::BufferFull => shared.notify_writable.clone(), - WriteOutcome::Closed => return Err(anyhow!("channel is closed")), - } - }; - notify.notified().await; +impl Clone for Sender { + fn clone(&self) -> Self { + Self { + shared: Arc::clone(&self.shared), + _ty: PhantomData, } } } From ae71855dc911ef2c97169f97eb135fc2a147ce74 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 10 May 2024 12:39:27 +0200 Subject: [PATCH 028/198] some further cleanup refactoring --- iroh-willow/src/{store => }/actor.rs | 104 +++++++++------ iroh-willow/src/lib.rs | 1 + iroh-willow/src/net.rs | 60 ++++----- iroh-willow/src/proto/willow.rs | 36 ++++++ iroh-willow/src/session.rs | 2 + iroh-willow/src/session/channels.rs | 65 ++++++++++ iroh-willow/src/session/coroutine.rs | 187 +++++++++++++-------------- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/state.rs | 19 +-- iroh-willow/src/store.rs | 13 +- iroh-willow/src/util.rs | 9 +- iroh-willow/src/util/channel.rs | 2 - 12 files changed, 302 insertions(+), 198 deletions(-) rename iroh-willow/src/{store => }/actor.rs (83%) create mode 100644 iroh-willow/src/session/channels.rs diff --git a/iroh-willow/src/store/actor.rs b/iroh-willow/src/actor.rs similarity index 83% rename from iroh-willow/src/store/actor.rs rename to iroh-willow/src/actor.rs index 238a14927f..79ec42e9f8 100644 --- a/iroh-willow/src/store/actor.rs +++ b/iroh-willow/src/actor.rs @@ -22,11 +22,12 @@ use crate::{ proto::{ grouping::ThreeDRange, keys::NamespaceId, + wgps::AreaOfInterestHandle, willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{Channels, Coroutine, Yield}, - Error, SessionInit, SessionState, SharedSessionState, + coroutine::{ControlRoutine, ReconcileRoutine}, + Channels, Error, SessionInit, SessionState, SharedSessionState, }, store::Store, }; @@ -97,7 +98,7 @@ impl StoreHandle { let join_handle = std::thread::Builder::new() .name("sync-actor".to_string()) .spawn(move || { - let span = error_span!("store", me=%me.fmt_short()); + let span = error_span!("willow_thread", me=%me.fmt_short()); let _guard = span.enter(); let mut actor = StorageThread { @@ -164,7 +165,7 @@ pub enum ToActor { }, IngestEntry { entry: AuthorisedEntry, - reply: oneshot::Sender>, + reply: oneshot::Sender>, }, Shutdown { #[debug(skip)] @@ -177,6 +178,7 @@ struct Session { state: SharedSessionState, channels: Channels, coroutines: HashSet, + span: Span, on_done: oneshot::Sender>, } @@ -255,25 +257,20 @@ impl StorageThread { peer, state, channels, - init: setup, + init, on_done, } => { + let span = error_span!("session", peer=%peer.fmt_short()); let session = Session { state: Rc::new(RefCell::new(state)), channels, coroutines: Default::default(), + span, on_done, }; self.sessions.insert(peer, session); - debug!("start coroutine control"); - - if let Err(error) = self.start_coroutine( - peer, - |routine| routine.run_control(setup).boxed_local(), - error_span!("session", peer=%peer.fmt_short()), - true, - ) { + if let Err(error) = self.start_control_routine(peer, init) { warn!(?error, peer=%peer.fmt_short(), "abort session: starting failed"); self.remove_session(&peer, Err(error)); } @@ -310,40 +307,69 @@ impl StorageThread { } } + fn start_control_routine( + &mut self, + session_id: SessionId, + init: SessionInit, + ) -> Result<(), Error> { + let create_fn = |co, session: &mut Session| { + let channels = session.channels.clone(); + let state = session.state.clone(); + ControlRoutine::new(co, channels, state) + .run(init) + .boxed_local() + }; + let span_fn = || error_span!("control"); + self.start_coroutine(session_id, create_fn, span_fn, true) + } + + fn start_reconcile_routine( + &mut self, + session_id: SessionId, + start: Option, + ) -> Result<(), Error> { + let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); + let store_writer = Rc::clone(&self.store); + let create_fn = |co, session: &mut Session| { + let channels = session.channels.clone(); + let state = session.state.clone(); + ReconcileRoutine::new(co, channels, state, store_snapshot, store_writer) + .run(start) + .boxed_local() + }; + let span_fn = || error_span!("reconcile"); + self.start_coroutine(session_id, create_fn, span_fn, false) + } + fn start_coroutine( &mut self, session_id: SessionId, - producer: impl FnOnce(Coroutine) -> CoroFut, - span: Span, + create_fn: impl FnOnce(WakeableCo, &mut Session) -> CoroFut, + span_fn: impl FnOnce() -> Span, finalizes_session: bool, ) -> Result<(), Error> { let session = self .sessions .get_mut(&session_id) .ok_or(Error::SessionNotFound)?; - let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); let id = { - let next_id = self.next_coro_id; + let id = self.next_coro_id; self.next_coro_id += 1; - next_id + id }; - let channels = session.channels.clone(); - let state = session.state.clone(); - let store_writer = Rc::clone(&self.store); + + session.coroutines.insert(id); let waker = self.notifier.create_waker(id); + let _guard = session.span.enter(); + let span = span_fn(); + drop(_guard); + let gen = Gen::new(move |co| { - let routine = Coroutine { - store_snapshot, - store_writer, - channels, - state, - co: WakeableCo::new(co, waker), - }; - (producer)(routine) + let co = WakeableCo::new(co, waker); + create_fn(co, session) }); - session.coroutines.insert(id); let state = CoroutineState { id, session_id, @@ -368,18 +394,12 @@ impl StorageThread { break Ok(()); } Yield::StartReconciliation(start) => { - debug!("start coroutine reconciliation"); - self.start_coroutine( - coro.session_id, - |state| state.run_reconciliation(start).boxed_local(), - error_span!("reconcile"), - false, - )?; + self.start_reconcile_routine(coro.session_id, start)?; } } } GeneratorState::Complete(res) => { - debug!(?res, "complete"); + debug!(?res, "routine completed"); if res.is_err() || coro.finalizes_session { self.remove_session(&coro.session_id, res) } @@ -390,6 +410,14 @@ impl StorageThread { } } +pub type InitWithArea = (AreaOfInterestHandle, AreaOfInterestHandle); + +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum Yield { + Pending, + StartReconciliation(Option), +} + #[derive(derive_more::Debug)] pub struct WakeableCo { pub waker: Waker, diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 1375ed16a4..ad2fc4f743 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -2,6 +2,7 @@ #![allow(missing_docs)] +pub mod actor; pub mod net; pub mod proto; pub mod session; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index e79634b1a5..9a48762438 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -9,20 +9,18 @@ use tokio::{ use tracing::{debug, error_span, instrument, trace, warn, Instrument}; use crate::{ + actor::{StoreHandle, ToActor}, proto::wgps::{ AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, - session::{coroutine::Channels, Role, SessionInit, SessionState}, - store::actor::{StoreHandle, ToActor}, + session::{channels::Channels, Role, SessionInit, SessionState}, util::channel::{inbound_channel, outbound_channel, Reader, Receiver, Sender, Writer}, }; const CHANNEL_CAP: usize = 1024 * 64; -const ERROR_CODE_CLOSE_GRACEFUL: u16 = 1; - -#[instrument(skip_all, fields(me=%me.fmt_short(), role=?our_role))] +#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=%peer.fmt_short()))] pub async fn run( me: NodeId, store: StoreHandle, @@ -31,6 +29,7 @@ pub async fn run( our_role: Role, init: SessionInit, ) -> anyhow::Result<()> { + debug!(?our_role, "connected"); let mut join_set = JoinSet::new(); let (mut control_send_stream, mut control_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, @@ -39,18 +38,16 @@ pub async fn run( control_send_stream.set_priority(i32::MAX)?; let our_nonce: AccessChallenge = rand::random(); - debug!("start"); let (received_commitment, max_payload_size) = exchange_commitments( &mut control_send_stream, &mut control_recv_stream, &our_nonce, ) .await?; - debug!("exchanged comittments"); + debug!("commitments exchanged"); let (control_send, control_recv) = spawn_channel( &mut join_set, - peer, LogicalChannel::Control, CHANNEL_CAP, control_send_stream, @@ -65,13 +62,12 @@ pub async fn run( reconciliation_recv_stream.read_u8().await?; let (reconciliation_send, reconciliation_recv) = spawn_channel( &mut join_set, - peer, LogicalChannel::Reconciliation, CHANNEL_CAP, reconciliation_send_stream, reconciliation_recv_stream, ); - debug!("reconcile channel open"); + debug!("channels opened"); let channels = Channels { control_send, @@ -97,7 +93,9 @@ pub async fn run( Ok(()) }); - join_all(join_set).await + join_all(join_set).await?; + debug!("all tasks finished"); + Ok(()) } async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { @@ -119,7 +117,6 @@ async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<( fn spawn_channel( join_set: &mut JoinSet>, - peer: NodeId, ch: LogicalChannel, cap: usize, send_stream: quinn::SendStream, @@ -130,13 +127,13 @@ fn spawn_channel( let recv_fut = recv_loop(recv_stream, inbound_writer) .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) - .instrument(error_span!("recv", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + .instrument(error_span!("recv", ch=%ch.fmt_short())); join_set.spawn(recv_fut); let send_fut = send_loop(send_stream, outbound_reader) .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))) - .instrument(error_span!("send", peer=%peer.fmt_short(), ch=%ch.fmt_short())); + .instrument(error_span!("send", ch=%ch.fmt_short())); join_set.spawn(send_fut); @@ -151,7 +148,6 @@ async fn recv_loop( channel_writer.write_all(&buf.bytes[..]).await?; trace!(len = buf.bytes.len(), "recv"); } - recv_stream.stop(ERROR_CODE_CLOSE_GRACEFUL.into()).ok(); channel_writer.close(); Ok(()) } @@ -165,13 +161,7 @@ async fn send_loop( send_stream.write_chunk(data).await?; trace!(len, "sent"); } - match send_stream.finish().await { - Ok(()) => {} - // If the other side closed gracefully, we are good. - Err(quinn::WriteError::Stopped(code)) - if code.into_inner() == ERROR_CODE_CLOSE_GRACEFUL as u64 => {} - Err(err) => return Err(err.into()), - } + send_stream.finish().await?; Ok(()) } @@ -207,6 +197,7 @@ mod tests { use tracing::{debug, info}; use crate::{ + actor::{StoreHandle, ToActor}, net::run, proto::{ grouping::{AreaOfInterest, ThreeDRange}, @@ -216,10 +207,7 @@ mod tests { willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Role, SessionInit}, - store::{ - actor::{StoreHandle, ToActor}, - MemoryStore, - }, + store::MemoryStore, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -374,18 +362,15 @@ mod tests { ) -> anyhow::Result { let user_secret = UserSecretKey::generate(rng); let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); - let subspace_id = user_secret.id(); - let namespace_id = namespace_secret.id(); for i in 0..count { - let path = path_fn(i); - let entry = Entry { - namespace_id, - subspace_id, - path: path.expect("invalid path"), - timestamp: 10, - payload_length: 2, - payload_digest: Hash::new("cool things"), - }; + let path = path_fn(i).expect("invalid path"); + let entry = Entry::new_current( + namespace_secret.id(), + user_secret.id(), + path, + Hash::new("hello"), + 5, + ); track_entries.extend([entry.clone()]); let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; store.ingest_entry(entry).await?; @@ -409,7 +394,6 @@ mod tests { AccessMode::Write, )); (read_capability, write_capability) - // let init = SessionInit::with_interest(secret_key, read_capability, AreaOfInterest::full()) } // async fn get_entries_debug( diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index b1dd9dc19f..5161e92d33 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -4,6 +4,8 @@ use bytes::Bytes; use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; +use crate::util::system_time_now; + use super::{ keys::{self, UserSecretKey}, meadowcap::{self, attach_authorisation, is_authorised_write, InvalidParams, McCapability}, @@ -174,6 +176,40 @@ pub struct Entry { } impl Entry { + pub fn new( + namespace_id: NamespaceId, + subspace_id: SubspaceId, + path: Path, + timestamp: u64, + payload_digest: PayloadDigest, + payload_length: u64, + ) -> Self { + Self { + namespace_id, + subspace_id, + path, + timestamp, + payload_length, + payload_digest, + } + } + pub fn new_current( + namespace_id: NamespaceId, + subspace_id: SubspaceId, + path: Path, + payload_digest: PayloadDigest, + payload_length: u64, + ) -> Self { + let timestamp = system_time_now(); + Self::new( + namespace_id, + subspace_id, + path, + timestamp, + payload_digest, + payload_length, + ) + } pub fn is_newer_than(&self, other: &Entry) -> bool { other.timestamp < self.timestamp || (other.timestamp == self.timestamp && other.payload_digest < self.payload_digest) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 8bee846810..6d6c57a86b 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -2,12 +2,14 @@ use std::collections::{HashMap, HashSet}; use crate::proto::{grouping::AreaOfInterest, keys::UserSecretKey, wgps::ReadCapability}; +pub mod channels; pub mod coroutine; mod error; pub mod resource; mod state; mod util; +pub use self::channels::Channels; pub use self::error::Error; pub use self::state::{SessionState, SharedSessionState}; diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs new file mode 100644 index 0000000000..b8ce397c4f --- /dev/null +++ b/iroh-willow/src/session/channels.rs @@ -0,0 +1,65 @@ +use tracing::debug; + +use crate::{ + proto::wgps::{LogicalChannel, Message}, + actor::WakeableCo, + util::channel::{ReadError, Receiver, Sender, WriteError}, +}; + +#[derive(Debug, Clone)] +pub struct Channels { + pub control_send: Sender, + pub control_recv: Receiver, + pub reconciliation_send: Sender, + pub reconciliation_recv: Receiver, +} + +impl Channels { + pub fn close_all(&self) { + self.control_send.close(); + self.control_recv.close(); + self.reconciliation_send.close(); + self.reconciliation_recv.close(); + } + pub fn close_send(&self) { + self.control_send.close(); + self.reconciliation_send.close(); + } + pub fn sender(&self, channel: LogicalChannel) -> &Sender { + match channel { + LogicalChannel::Control => &self.control_send, + LogicalChannel::Reconciliation => &self.reconciliation_send, + } + } + pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { + match channel { + LogicalChannel::Control => &self.control_recv, + LogicalChannel::Reconciliation => &self.reconciliation_recv, + } + } + + pub async fn send_co( + &self, + co: &WakeableCo, + message: impl Into, + ) -> Result<(), WriteError> { + let message = message.into(); + let channel = message.logical_channel(); + co.yield_wake(self.sender(channel).send_message(&message)) + .await?; + debug!(%message, ch=%channel.fmt_short(), "send"); + Ok(()) + } + + pub async fn recv_co( + &self, + co: &WakeableCo, + channel: LogicalChannel, + ) -> Option> { + let message = co.yield_wake(self.receiver(channel).recv_message()).await; + if let Some(Ok(message)) = &message { + debug!(%message, ch=%channel.fmt_short(),"recv"); + } + message + } +} diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index cd0af2a836..6c05cac62a 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -7,6 +7,7 @@ use std::{ use tracing::{debug, trace}; use crate::{ + actor::{InitWithArea, WakeableCo, Yield}, proto::{ grouping::ThreeDRange, keys::NamespaceId, @@ -17,96 +18,27 @@ use crate::{ }, willow::AuthorisedEntry, }, - session::{Error, SessionInit, SessionState, SharedSessionState}, - store::{actor::WakeableCo, ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{ReadError, Receiver, Sender, WriteError}, + session::{Channels, Error, SessionInit, SessionState, SharedSessionState}, + store::{ReadonlyStore, SplitAction, Store, SyncConfig}, + util::channel::{ReadError, WriteError}, }; -#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] -pub enum Yield { - Pending, - StartReconciliation(Option<(AreaOfInterestHandle, AreaOfInterestHandle)>), -} - #[derive(derive_more::Debug)] -pub struct Coroutine { - pub store_snapshot: Rc, - pub store_writer: Rc>, - pub channels: Channels, - pub state: SharedSessionState, - pub co: WakeableCo, -} - -#[derive(Debug, Clone)] -pub struct Channels { - pub control_send: Sender, - pub control_recv: Receiver, - pub reconciliation_send: Sender, - pub reconciliation_recv: Receiver, -} - -impl Channels { - pub fn close_all(&self) { - self.control_send.close(); - self.control_recv.close(); - self.reconciliation_send.close(); - self.reconciliation_recv.close(); - } - pub fn close_send(&self) { - self.control_send.close(); - self.reconciliation_send.close(); - } - pub fn sender(&self, channel: LogicalChannel) -> &Sender { - match channel { - LogicalChannel::Control => &self.control_send, - LogicalChannel::Reconciliation => &self.reconciliation_send, - } - } - pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { - match channel { - LogicalChannel::Control => &self.control_recv, - LogicalChannel::Reconciliation => &self.reconciliation_recv, - } - } +pub struct ControlRoutine { + channels: Channels, + state: SharedSessionState, + co: WakeableCo, } - -// Note that all async methods yield to the owner of the coroutine. They are not running in a tokio -// context. You may not perform regular async operations in them. -impl Coroutine { - pub async fn run_reconciliation( - mut self, - start_with_aoi: Option<(AreaOfInterestHandle, AreaOfInterestHandle)>, - ) -> Result<(), Error> { - debug!(start = start_with_aoi.is_some(), "start reconciliation"); - - // optionally initiate reconciliation with a first fingerprint. only alfie may do this. - if let Some((our_handle, their_handle)) = start_with_aoi { - self.start_reconciliation(our_handle, their_handle).await?; - } - - while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { - let message = message?; - trace!(%message, "recv"); - match message { - Message::ReconciliationSendFingerprint(message) => { - self.on_send_fingerprint(message).await? - } - Message::ReconciliationAnnounceEntries(message) => { - self.on_announce_entries(message).await? - } - Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, - _ => return Err(Error::UnsupportedMessage), - }; - - if self.state().reconciliation_is_complete() { - self.channels.close_send(); - } +impl ControlRoutine { + pub fn new(co: WakeableCo, channels: Channels, state: SharedSessionState) -> Self { + Self { + channels, + state, + co, } - - Ok(()) } - - pub async fn run_control(mut self, init: SessionInit) -> Result<(), Error> { + pub async fn run(mut self, init: SessionInit) -> Result<(), Error> { + debug!(role = ?self.state().our_role, "start session"); let reveal_message = self.state().commitment_reveal()?; self.send(reveal_message).await?; @@ -142,7 +74,7 @@ impl Coroutine { } async fn setup(&mut self, init: SessionInit) -> Result<(), Error> { - debug!(?init, "setup"); + debug!(interests = init.interests.len(), "setup"); for (capability, aois) in init.interests.into_iter() { if *capability.receiver() != init.user_secret_key.public_key() { return Err(Error::WrongSecretKeyForCapability); @@ -177,12 +109,81 @@ impl Coroutine { Ok(()) } + fn state(&mut self) -> RefMut { + self.state.borrow_mut() + } + + async fn recv(&self, channel: LogicalChannel) -> Option> { + self.channels.recv_co(&self.co, channel).await + } + + async fn send(&self, message: impl Into) -> Result<(), WriteError> { + self.channels.send_co(&self.co, message).await + } +} + +#[derive(derive_more::Debug)] +pub struct ReconcileRoutine { + store_snapshot: Rc, + store_writer: Rc>, + channels: Channels, + state: SharedSessionState, + co: WakeableCo, +} + +// Note that all async methods yield to the owner of the coroutine. They are not running in a tokio +// context. You may not perform regular async operations in them. +impl ReconcileRoutine { + pub fn new( + co: WakeableCo, + channels: Channels, + state: SharedSessionState, + store_snapshot: Rc, + store_writer: Rc>, + ) -> Self { + Self { + channels, + state, + co, + store_snapshot, + store_writer, + } + } + pub async fn run(mut self, start: Option) -> Result<(), Error> { + debug!(init = start.is_some(), "start reconciliation"); + + // optionally initiate reconciliation with a first fingerprint. only alfie may do this. + if let Some((our_handle, their_handle)) = start { + self.start_reconciliation(our_handle, their_handle).await?; + } + + while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { + let message = message?; + trace!(%message, "recv"); + match message { + Message::ReconciliationSendFingerprint(message) => { + self.on_send_fingerprint(message).await? + } + Message::ReconciliationAnnounceEntries(message) => { + self.on_announce_entries(message).await? + } + Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, + _ => return Err(Error::UnsupportedMessage), + }; + + if self.state().reconciliation_is_complete() { + self.channels.close_send(); + } + } + + Ok(()) + } + async fn start_reconciliation( &mut self, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, ) -> Result<(), Error> { - debug!("init reconciliation"); let mut state = self.state(); let our_aoi = state.our_resources.areas_of_interest.try_get(&our_handle)?; let their_aoi = state @@ -478,20 +479,10 @@ impl Coroutine { } async fn recv(&self, channel: LogicalChannel) -> Option> { - let receiver = self.channels.receiver(channel); - let message = self.co.yield_wake(receiver.recv_message()).await; - if let Some(Ok(message)) = &message { - debug!(ch=%channel.fmt_short(), %message, "recv"); - } - message + self.channels.recv_co(&self.co, channel).await } async fn send(&self, message: impl Into) -> Result<(), WriteError> { - let message: Message = message.into(); - let channel = message.logical_channel(); - let sender = self.channels.sender(channel); - self.co.yield_wake(sender.send_message(&message)).await?; - debug!(ch=%channel.fmt_short(), %message, "send"); - Ok(()) + self.channels.send_co(&self.co, message).await } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 8a311a13f9..694ba965bd 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -39,6 +39,8 @@ pub enum Error { SessionNotFound, #[error("invalid parameters: {0}")] InvalidParameters(&'static str), + #[error("reached an invalid state")] + InvalidState(&'static str), } impl From for Error { diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index e860f7ab0f..5f18efd6c3 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,6 +1,6 @@ use std::{cell::RefCell, collections::HashSet, rc::Rc}; -use tracing::{trace, warn}; +use tracing::warn; use crate::proto::{ challenge::ChallengeState, @@ -55,16 +55,9 @@ impl SessionState { } } pub fn reconciliation_is_complete(&self) -> bool { - let is_complete = self.reconciliation_started + self.reconciliation_started && self.pending_ranges.is_empty() - && self.pending_entries.is_none(); - trace!( - started = self.reconciliation_started, - pending_ranges = self.pending_ranges.len(), - pending_entries = ?self.pending_entries, - "is_complete {is_complete}" - ); - is_complete + && self.pending_entries.is_none() } pub fn bind_and_sign_capability( @@ -87,8 +80,6 @@ impl SessionState { Ok((our_handle, maybe_message)) } - // pub fn bind_aoi() - pub fn commitment_reveal(&mut self) -> Result { match self.challenge { ChallengeState::Committed { our_nonce, .. } => { @@ -96,12 +87,10 @@ impl SessionState { } _ => Err(Error::InvalidMessageInCurrentState), } - // let msg = CommitmentReveal { nonce: our_nonce }; } pub fn on_commitment_reveal(&mut self, msg: CommitmentReveal) -> Result<(), Error> { - self.challenge.reveal(self.our_role, msg.nonce)?; - Ok(()) + self.challenge.reveal(self.our_role, msg.nonce) } pub fn on_setup_bind_read_capability( diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 25a5b6209c..390193e3e5 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -8,8 +8,6 @@ use crate::proto::{ willow::{AuthorisedEntry, Entry, NamespaceId}, }; -pub mod actor; - #[derive(Debug, Clone, Copy)] pub struct SyncConfig { /// Up to how many values to send immediately, before sending only a fingerprint. @@ -59,7 +57,7 @@ pub trait Store: ReadonlyStore + 'static { type Snapshot: ReadonlyStore + Send; fn snapshot(&mut self) -> Result; - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()>; + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result; } /// A very inefficient in-memory store, for testing purposes only @@ -203,7 +201,7 @@ impl Store for MemoryStore { entries: self.entries.clone(), })) } - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result<()> { + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); let mut to_remove = vec![]; @@ -214,20 +212,23 @@ impl Store for MemoryStore { && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists - return Ok(()); + tracing::warn!("SKIP INGEST!"); + return Ok(false); } if new.subspace_id == existing.subspace_id && new.path.is_prefix_of(&existing.path) && new.is_newer_than(existing) { + tracing::warn!("REMOVE!"); to_remove.push(i); } } for i in to_remove { entries.remove(i); } + tracing::warn!("INGEST!"); entries.push(entry.clone()); - Ok(()) + Ok(true) } } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 7f3d2c55bc..286d320124 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -1,7 +1,14 @@ -use std::io; +use std::{io, time::SystemTime}; pub mod channel; +pub fn system_time_now() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time drift") + .as_micros() as u64 +} + pub trait Encoder: std::fmt::Debug { fn encoded_len(&self) -> usize; diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index af219f47bf..5df010173b 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -9,7 +9,6 @@ use std::{ use bytes::{Buf, Bytes, BytesMut}; use tokio::io::AsyncWrite; -use tracing::trace; use super::{DecodeOutcome, Decoder, Encoder}; @@ -178,7 +177,6 @@ impl Shared { cx: &mut task::Context<'_>, ) -> Poll>> { let buf = self.peek(); - trace!("read, remaining {}", buf.len()); if self.is_closed() && self.is_empty() { return Poll::Ready(None); } From a2325bd4d6f2cc4b68f3fff571512d4dea2f0dda Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 10 May 2024 12:50:04 +0200 Subject: [PATCH 029/198] fix --- iroh-willow/src/actor.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 79ec42e9f8..9f31f35172 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -320,7 +320,7 @@ impl StorageThread { .boxed_local() }; let span_fn = || error_span!("control"); - self.start_coroutine(session_id, create_fn, span_fn, true) + self.start_coroutine(session_id, create_fn, span_fn, false) } fn start_reconcile_routine( @@ -338,7 +338,7 @@ impl StorageThread { .boxed_local() }; let span_fn = || error_span!("reconcile"); - self.start_coroutine(session_id, create_fn, span_fn, false) + self.start_coroutine(session_id, create_fn, span_fn, true) } fn start_coroutine( From 538267f24a9194f7c0befc2f9b9d6bdab27e91ee Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 10 May 2024 13:03:10 +0200 Subject: [PATCH 030/198] fix shutdown --- iroh-willow/src/actor.rs | 19 ++++++++++++------- iroh-willow/src/session/channels.rs | 2 +- iroh-willow/src/session/coroutine.rs | 5 ++++- iroh-willow/src/store.rs | 3 --- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 9f31f35172..36d98b1025 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -204,7 +204,6 @@ struct CoroutineState { #[debug("Generator")] gen: Gen, span: Span, - finalizes_session: bool, } impl StorageThread { @@ -320,7 +319,7 @@ impl StorageThread { .boxed_local() }; let span_fn = || error_span!("control"); - self.start_coroutine(session_id, create_fn, span_fn, false) + self.start_coroutine(session_id, create_fn, span_fn) } fn start_reconcile_routine( @@ -338,7 +337,7 @@ impl StorageThread { .boxed_local() }; let span_fn = || error_span!("reconcile"); - self.start_coroutine(session_id, create_fn, span_fn, true) + self.start_coroutine(session_id, create_fn, span_fn) } fn start_coroutine( @@ -346,7 +345,6 @@ impl StorageThread { session_id: SessionId, create_fn: impl FnOnce(WakeableCo, &mut Session) -> CoroFut, span_fn: impl FnOnce() -> Span, - finalizes_session: bool, ) -> Result<(), Error> { let session = self .sessions @@ -375,7 +373,6 @@ impl StorageThread { session_id, gen, span, - finalizes_session, }; self.resume_coroutine(state) } @@ -399,8 +396,14 @@ impl StorageThread { } } GeneratorState::Complete(res) => { - debug!(?res, "routine completed"); - if res.is_err() || coro.finalizes_session { + let session = self + .sessions + .get_mut(&coro.session_id) + .ok_or(Error::SessionNotFound)?; + session.coroutines.remove(&coro.id); + let is_last = session.coroutines.is_empty(); + debug!(?res, ?is_last, "routine completed"); + if res.is_err() || is_last { self.remove_session(&coro.session_id, res) } break Ok(()); @@ -408,6 +411,8 @@ impl StorageThread { } } } + + // fn on_coroutine_complete(&mut self, id: CoroId) } pub type InitWithArea = (AreaOfInterestHandle, AreaOfInterestHandle); diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index b8ce397c4f..f6eee958bb 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -1,8 +1,8 @@ use tracing::debug; use crate::{ - proto::wgps::{LogicalChannel, Message}, actor::WakeableCo, + proto::wgps::{LogicalChannel, Message}, util::channel::{ReadError, Receiver, Sender, WriteError}, }; diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 6c05cac62a..9187c07b45 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -172,7 +172,10 @@ impl ReconcileRoutine { }; if self.state().reconciliation_is_complete() { - self.channels.close_send(); + self.channels.reconciliation_send.close(); + // this will make the control routine stop! + self.channels.control_recv.close(); + break; } } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 390193e3e5..97d355a7c0 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -212,21 +212,18 @@ impl Store for MemoryStore { && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists - tracing::warn!("SKIP INGEST!"); return Ok(false); } if new.subspace_id == existing.subspace_id && new.path.is_prefix_of(&existing.path) && new.is_newer_than(existing) { - tracing::warn!("REMOVE!"); to_remove.push(i); } } for i in to_remove { entries.remove(i); } - tracing::warn!("INGEST!"); entries.push(entry.clone()); Ok(true) } From 484b103425b5e93f1926b29c9f6bc3802fedd9f0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sun, 12 May 2024 01:29:55 +0200 Subject: [PATCH 031/198] implement resource control and refactor channel opening --- Cargo.lock | 12 ++- iroh-willow/Cargo.toml | 12 ++- iroh-willow/src/actor.rs | 4 +- iroh-willow/src/net.rs | 133 +++++++++++++++++++++----- iroh-willow/src/proto/wgps.rs | 134 ++++++++++++++++++++++++--- iroh-willow/src/session/channels.rs | 40 ++++++-- iroh-willow/src/session/coroutine.rs | 27 +++++- iroh-willow/src/util/channel.rs | 81 ++++++++++++++-- 8 files changed, 377 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a2fcc04d4f..332f775788 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,7 +37,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] @@ -2919,7 +2919,7 @@ dependencies = [ [[package]] name = "iroh-willow" -version = "0.14.0" +version = "0.17.0" dependencies = [ "anyhow", "bytes", @@ -2927,6 +2927,8 @@ dependencies = [ "ed25519-dalek", "flume", "futures", + "futures-concurrency", + "futures-lite 2.3.0", "genawaiter", "hex", "iroh-base", @@ -2944,7 +2946,7 @@ dependencies = [ "rand_chacha", "rand_core", "rayon", - "redb 2.0.0", + "redb 2.1.0", "rtrb", "self_cell", "serde", @@ -6762,7 +6764,7 @@ version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ - "zerocopy-derive 0.7.32", + "zerocopy-derive 0.7.34", ] [[package]] @@ -6793,7 +6795,7 @@ checksum = "e862f7936bea2c96af2769d9d60ff534da9af29dd59943519403256f30bf5ac3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.66", ] [[package]] diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index cb711c5dda..c2dff70a00 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-willow" -version = "0.14.0" +version = "0.17.0" edition = "2021" readme = "README.md" description = "willow protocol implementation for iroh" @@ -16,11 +16,11 @@ workspace = true [dependencies] anyhow = "1" -derive_more = { version = "1.0.0-beta.1", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref"] } +derive_more = { version = "1.0.0-beta.1", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } flume = "0.11" -iroh-base = { version = "0.14.0", path = "../iroh-base" } -iroh-metrics = { version = "0.14.0", path = "../iroh-metrics", optional = true } +iroh-base = { version = "0.17.0", path = "../iroh-base" } +iroh-metrics = { version = "0.17.0", path = "../iroh-metrics", optional = true } num_enum = "0.7" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = "0.8.5" @@ -38,7 +38,7 @@ redb = { version = "2.0.0" } tempfile = { version = "3.4" } # net -iroh-net = { version = "0.14.0", optional = true, path = "../iroh-net" } +iroh-net = { version = "0.17.0", optional = true, path = "../iroh-net" } tokio-util = { version = "0.7", optional = true, features = ["codec", "io-util", "io"] } tokio-stream = { version = "0.1", optional = true, features = ["sync"]} quinn = { version = "0.10", optional = true } @@ -52,6 +52,8 @@ once_cell = "1.19.0" rayon = "1.10.0" smallvec = "1.13.2" itertools = "0.12.1" +futures-lite = "2.3.0" +futures-concurrency = "7.6.0" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 36d98b1025..45b14f0b9d 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -32,7 +32,7 @@ use crate::{ store::Store, }; -pub const CHANNEL_CAP: usize = 1024; +pub const INBOX_CAP: usize = 1024; pub type SessionId = NodeId; @@ -87,7 +87,7 @@ impl Notifier { impl StoreHandle { pub fn spawn(store: S, me: NodeId) -> StoreHandle { - let (tx, rx) = flume::bounded(CHANNEL_CAP); + let (tx, rx) = flume::bounded(INBOX_CAP); // This channel only tracks wake to resume messages to coroutines, which are a sinlge u64 // per wakeup. We want to issue wake calls synchronosuly without blocking, so we use an // unbounded channel here. The actual capacity is bounded by the number of sessions times diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9a48762438..9de0d5d46d 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,5 +1,6 @@ use anyhow::ensure; use futures::TryFutureExt; +use futures_concurrency::future::TryJoin; use iroh_base::{hash::Hash, key::NodeId}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, @@ -14,11 +15,16 @@ use crate::{ AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, - session::{channels::Channels, Role, SessionInit, SessionState}, - util::channel::{inbound_channel, outbound_channel, Reader, Receiver, Sender, Writer}, + session::{ + channels::{Channels, LogicalChannelReceivers, LogicalChannelSenders}, + Role, SessionInit, SessionState, + }, + util::channel::{ + inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, + }, }; -const CHANNEL_CAP: usize = 1024 * 64; +pub const CHANNEL_CAP: usize = 1024 * 64; #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=%peer.fmt_short()))] pub async fn run( @@ -50,30 +56,19 @@ pub async fn run( &mut join_set, LogicalChannel::Control, CHANNEL_CAP, + CHANNEL_CAP, + Guarantees::Unlimited, control_send_stream, control_recv_stream, ); - let (mut reconciliation_send_stream, mut reconciliation_recv_stream) = match our_role { - Role::Alfie => conn.open_bi().await?, - Role::Betty => conn.accept_bi().await?, - }; - reconciliation_send_stream.write_u8(0u8).await?; - reconciliation_recv_stream.read_u8().await?; - let (reconciliation_send, reconciliation_recv) = spawn_channel( - &mut join_set, - LogicalChannel::Reconciliation, - CHANNEL_CAP, - reconciliation_send_stream, - reconciliation_recv_stream, - ); + let (logical_send, logical_recv) = open_logical_channels(&mut join_set, conn, our_role).await?; debug!("channels opened"); - let channels = Channels { control_send, control_recv, - reconciliation_send, - reconciliation_recv, + logical_send, + logical_recv, }; let state = SessionState::new(our_role, our_nonce, received_commitment, max_payload_size); @@ -115,15 +110,109 @@ async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<( final_result } +#[derive(Debug, thiserror::Error)] +#[error("missing channel: {0:?}")] +struct MissingChannel(LogicalChannel); + +async fn open_logical_channels( + join_set: &mut JoinSet>, + conn: quinn::Connection, + our_role: Role, +) -> anyhow::Result<(LogicalChannelSenders, LogicalChannelReceivers)> { + let cap = CHANNEL_CAP; + let channels = [LogicalChannel::Reconciliation, LogicalChannel::StaticToken]; + let mut channels = match our_role { + Role::Alfie => { + channels + .map(|ch| { + let conn = conn.clone(); + async move { + let ch_id = ch as u8; + let (mut send, recv) = conn.open_bi().await?; + send.write_u8(ch_id).await?; + Result::<_, anyhow::Error>::Ok((ch, Some((send, recv)))) + } + }) + .try_join() + .await + } + Role::Betty => { + channels + .map(|_| async { + let (send, mut recv) = conn.accept_bi().await?; + let channel_id = recv.read_u8().await?; + let channel = LogicalChannel::try_from(channel_id)?; + Result::<_, anyhow::Error>::Ok((channel, Some((send, recv)))) + }) + .try_join() + .await + } + }?; + + let mut take_channel = |ch| { + channels + .iter_mut() + .find(|(c, _)| *c == ch) + .map(|(_, streams)| streams.take()) + .flatten() + .ok_or(MissingChannel(ch)) + .map(|(send_stream, recv_stream)| { + spawn_channel( + join_set, + ch, + cap, + cap, + Guarantees::Limited(0), + send_stream, + recv_stream, + ) + }) + }; + + let rec = take_channel(LogicalChannel::Reconciliation)?; + let stt = take_channel(LogicalChannel::StaticToken)?; + Ok(( + LogicalChannelSenders { + reconciliation: rec.0, + static_tokens: stt.0, + }, + LogicalChannelReceivers { + reconciliation: rec.1, + static_tokens: stt.1, + }, + )) +} + +// async fn open_logical_channel( +// join_set: &mut JoinSet>, +// conn: &quinn::Connection, +// ch: LogicalChannel, +// ) -> anyhow::Result<(Sender, Receiver)> { +// let (mut send_stream, recv_stream) = conn.open_bi().await?; +// send_stream.write_u8(ch as u8).await?; +// let cap = CHANNEL_CAP; +// Ok(spawn_channel( +// join_set, +// ch, +// cap, +// cap, +// Guarantees::Limited(0), +// send_stream, +// recv_stream, +// )) +// } + fn spawn_channel( join_set: &mut JoinSet>, ch: LogicalChannel, - cap: usize, + send_cap: usize, + recv_cap: usize, + guarantees: Guarantees, send_stream: quinn::SendStream, recv_stream: quinn::RecvStream, ) -> (Sender, Receiver) { - let (sender, outbound_reader) = outbound_channel(cap); - let (inbound_writer, recveiver) = inbound_channel(cap); + let (sender, outbound_reader) = outbound_channel(send_cap, guarantees); + let (inbound_writer, recveiver) = inbound_channel(recv_cap); let recv_fut = recv_loop(recv_stream, inbound_writer) .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 054d213349..812c4da5aa 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -69,12 +69,12 @@ pub enum HandleType { } /// The different logical channels employed by the WGPS. -#[derive(Debug, Serialize, Deserialize, Copy, Clone, Eq, PartialEq, Hash)] +#[derive(Debug, Serialize, Deserialize, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] pub enum LogicalChannel { /// Control channel - Control, + Control = 0, /// Logical channel for performing 3d range-based set reconciliation. - Reconciliation, + Reconciliation = 1, // TODO: use all the channels // right now everything but reconciliation goes into the control channel // @@ -93,8 +93,25 @@ pub enum LogicalChannel { // /// Logical channel for controlling the binding of new PayloadRequestHandles. // PayloadRequest, // - // /// Logical channel for controlling the binding of new StaticTokenHandles. - // StaticToken, + /// Logical channel for controlling the binding of new StaticTokenHandles. + StaticToken = 8, +} + +#[derive(Debug, thiserror::Error)] +#[error("invalid channel id")] +pub struct InvalidChannelId; + +impl TryFrom for LogicalChannel { + type Error = InvalidChannelId; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(Self::Control), + 1 => Ok(Self::Reconciliation), + 8 => Ok(Self::StaticToken), + _ => Err(InvalidChannelId), + } + } } impl LogicalChannel { @@ -102,6 +119,7 @@ impl LogicalChannel { match self { LogicalChannel::Control => "Ctl", LogicalChannel::Reconciliation => "Rec", + LogicalChannel::StaticToken => "StT", } } } @@ -223,18 +241,11 @@ impl Encoder for Message { fn encoded_len(&self) -> usize { let data_len = postcard::experimental::serialized_size(&self).unwrap(); let header_len = 4; - // tracing::debug!( - // data_len, - // header_len, - // full_len = data_len + header_len, - // "Message encoded_len" - // ); data_len + header_len } fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { let len = postcard::experimental::serialized_size(&self).unwrap() as u32; - // tracing::debug!(msg_len = len, full_len = len + 4, "Message encode"); out.write_all(&len.to_be_bytes())?; postcard::to_io(self, out)?; Ok(()) @@ -277,6 +288,105 @@ impl Message { } } +// #[derive(Debug, derive_more::From, derive_more::TryInto)] +// pub enum ChanMessage { +// Control(ControlMessage), +// Reconciliation(ReconciliationMessage), +// } +// impl From for ChanMessage { +// fn from(value: Message) -> Self { +// match value { +// Message::ReconciliationSendFingerprint(msg) => Self::Reconciliation(msg.into()), +// Message::ReconciliationAnnounceEntries(msg) => Self::Reconciliation(msg.into()), +// Message::ReconciliationSendEntry(msg) => Self::Reconciliation(msg.into()), +// +// Message::CommitmentReveal(msg) => Self::Control(msg.into()), +// Message::SetupBindStaticToken(msg) => Self::Control(msg.into()), +// Message::SetupBindReadCapability(msg) => Self::Control(msg.into()), +// Message::SetupBindAreaOfInterest(msg) => Self::Control(msg.into()), +// +// Message::ControlIssueGuarantee(msg) => Self::Control(msg.into()), +// Message::ControlAbsolve(msg) => Self::Control(msg.into()), +// Message::ControlPlead(msg) => Self::Control(msg.into()), +// Message::ControlAnnounceDropping(msg) => Self::Control(msg.into()), +// Message::ControlApologise(msg) => Self::Control(msg.into()), +// Message::ControlFreeHandle(msg) => Self::Control(msg.into()), +// } +// } +// } +// impl From for Message { +// fn from(message: ChanMessage) -> Self { +// match message { +// ChanMessage::Control(message) => message.into(), +// ChanMessage::Reconciliation(message) => message.into(), +// } +// } +// } +// +// #[derive(Debug, derive_more::From)] +// pub enum ReconciliationMessage { +// SendFingerprint(ReconciliationSendFingerprint), +// AnnounceEntries(ReconciliationAnnounceEntries), +// SendEntry(ReconciliationSendEntry), +// } +// +// impl From for Message { +// fn from(message: ReconciliationMessage) -> Self { +// match message { +// ReconciliationMessage::SendFingerprint(message) => message.into(), +// ReconciliationMessage::AnnounceEntries(message) => message.into(), +// ReconciliationMessage::SendEntry(message) => message.into(), +// } +// } +// } +// +// impl Encoder for ReconciliationMessage { +// fn encoded_len(&self) -> usize { +// Message::from(se) +// todo!() +// } +// +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// todo!() +// } +// } +// +// #[derive(Debug, derive_more::From)] +// pub enum ControlMessage { +// CommitmentReveal(CommitmentReveal), +// // TODO: move to CapabilityChannel +// SetupBindReadCapability(SetupBindReadCapability), +// // TODO: move to StaticTokenChannel +// SetupBindStaticToken(SetupBindStaticToken), +// // TODO: move to AreaOfInterestChannel +// SetupBindAreaOfInterest(SetupBindAreaOfInterest), +// +// IssueGuarantee(ControlIssueGuarantee), +// Absolve(ControlAbsolve), +// Plead(ControlPlead), +// AnnounceDropping(ControlAnnounceDropping), +// Apologise(ControlApologise), +// +// FreeHandle(ControlFreeHandle), +// } +// +// impl From for Message { +// fn from(message: ControlMessage) -> Self { +// match message { +// ControlMessage::CommitmentReveal(message) => message.into(), +// ControlMessage::SetupBindReadCapability(message) => message.into(), +// ControlMessage::SetupBindStaticToken(message) => message.into(), +// ControlMessage::SetupBindAreaOfInterest(message) => message.into(), +// ControlMessage::IssueGuarantee(message) => message.into(), +// ControlMessage::Absolve(message) => message.into(), +// ControlMessage::Plead(message) => message.into(), +// ControlMessage::AnnounceDropping(message) => message.into(), +// ControlMessage::Apologise(message) => message.into(), +// ControlMessage::FreeHandle(message) => message.into(), +// } +// } +// } + /// Bind a ReadCapability to a CapabilityHandle. /// /// The SetupBindReadCapability messages let peers bind a ReadCapability for later reference. diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index f6eee958bb..846b2c45da 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -6,35 +6,61 @@ use crate::{ util::channel::{ReadError, Receiver, Sender, WriteError}, }; +#[derive(Debug, Clone)] +pub struct LogicalChannelReceivers { + pub reconciliation: Receiver, + pub static_tokens: Receiver, +} +impl LogicalChannelReceivers { + pub fn close(&self) { + self.reconciliation.close(); + self.static_tokens.close(); + } +} + +#[derive(Debug, Clone)] +pub struct LogicalChannelSenders { + pub reconciliation: Sender, + pub static_tokens: Sender, +} +impl LogicalChannelSenders { + pub fn close(&self) { + self.reconciliation.close(); + self.static_tokens.close(); + } +} + #[derive(Debug, Clone)] pub struct Channels { pub control_send: Sender, pub control_recv: Receiver, - pub reconciliation_send: Sender, - pub reconciliation_recv: Receiver, + pub logical_send: LogicalChannelSenders, + pub logical_recv: LogicalChannelReceivers, } impl Channels { pub fn close_all(&self) { self.control_send.close(); self.control_recv.close(); - self.reconciliation_send.close(); - self.reconciliation_recv.close(); + self.logical_send.close(); + self.logical_recv.close(); } pub fn close_send(&self) { self.control_send.close(); - self.reconciliation_send.close(); + self.logical_send.close(); } pub fn sender(&self, channel: LogicalChannel) -> &Sender { match channel { LogicalChannel::Control => &self.control_send, - LogicalChannel::Reconciliation => &self.reconciliation_send, + LogicalChannel::Reconciliation => &self.logical_send.reconciliation, + LogicalChannel::StaticToken => &self.logical_send.static_tokens, } } pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { match channel { LogicalChannel::Control => &self.control_recv, - LogicalChannel::Reconciliation => &self.reconciliation_recv, + LogicalChannel::Reconciliation => &self.logical_recv.reconciliation, + LogicalChannel::StaticToken => &self.logical_recv.static_tokens, } } diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 9187c07b45..28a81176b0 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -8,13 +8,14 @@ use tracing::{debug, trace}; use crate::{ actor::{InitWithArea, WakeableCo, Yield}, + net::CHANNEL_CAP, proto::{ grouping::ThreeDRange, keys::NamespaceId, wgps::{ - AreaOfInterestHandle, Fingerprint, LengthyEntry, LogicalChannel, Message, - ReconciliationAnnounceEntries, ReconciliationSendEntry, ReconciliationSendFingerprint, - SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, + AreaOfInterestHandle, ControlIssueGuarantee, Fingerprint, LengthyEntry, LogicalChannel, + Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }, willow::AuthorisedEntry, }, @@ -41,6 +42,11 @@ impl ControlRoutine { debug!(role = ?self.state().our_role, "start session"); let reveal_message = self.state().commitment_reveal()?; self.send(reveal_message).await?; + let msg = ControlIssueGuarantee { + amount: CHANNEL_CAP as u64, + channel: LogicalChannel::Reconciliation, + }; + self.send(msg).await?; let mut init = Some(init); while let Some(message) = self.recv(LogicalChannel::Control).await { @@ -66,6 +72,15 @@ impl ControlRoutine { Message::ControlFreeHandle(_msg) => { // TODO: Free handles } + Message::ControlIssueGuarantee(msg) => { + let ControlIssueGuarantee { amount, channel } = msg; + // let receiver = self.channels.receiver(channel); + // let did_set = receiver.set_cap(amount as usize); + // tracing::error!("recv {channel:?} {amount} {did_set}"); + let sender = self.channels.sender(channel); + let did_set = sender.add_guarantees(amount); + debug!(?channel, amount, ?did_set, "set send capacity"); + } _ => return Err(Error::UnsupportedMessage), } } @@ -172,8 +187,10 @@ impl ReconcileRoutine { }; if self.state().reconciliation_is_complete() { - self.channels.reconciliation_send.close(); - // this will make the control routine stop! + // we won't send anything further, so close our send channel, which will end the + // remote's recv channel. + self.channels.logical_send.reconciliation.close(); + // for now unconditionally end the session by closing our control receiver self.channels.control_recv.close(); break; } diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 5df010173b..6fe887649e 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -13,7 +13,7 @@ use tokio::io::AsyncWrite; use super::{DecodeOutcome, Decoder, Encoder}; pub fn pipe(cap: usize) -> (Writer, Reader) { - let shared = Shared::new(cap); + let shared = Shared::new(cap, Guarantees::Unlimited); let writer = Writer { shared: shared.clone(), }; @@ -21,8 +21,8 @@ pub fn pipe(cap: usize) -> (Writer, Reader) { (writer, reader) } -pub fn outbound_channel(cap: usize) -> (Sender, Reader) { - let shared = Shared::new(cap); +pub fn outbound_channel(cap: usize, guarantees: Guarantees) -> (Sender, Reader) { + let shared = Shared::new(cap, guarantees); let sender = Sender { shared: shared.clone(), _ty: PhantomData, @@ -32,7 +32,7 @@ pub fn outbound_channel(cap: usize) -> (Sender, Reader) { } pub fn inbound_channel(cap: usize) -> (Writer, Receiver) { - let shared = Shared::new(cap); + let shared = Shared::new(cap, Guarantees::Unlimited); let writer = Writer { shared: shared.clone(), }; @@ -59,6 +59,35 @@ pub enum ReadError { Decode(anyhow::Error), } +#[derive(Debug)] +pub enum Guarantees { + Unlimited, + Limited(u64), +} + +impl Guarantees { + pub fn add(&mut self, amount: u64) { + *self = match self { + Self::Unlimited => Self::Unlimited, + Self::Limited(ref mut current) => Self::Limited(current.wrapping_add(amount)), + } + } + + pub fn get(&self) -> u64 { + match self { + Self::Unlimited => u64::MAX, + Self::Limited(current) => *current, + } + } + + pub fn r#use(&mut self, amount: u64) { + *self = match self { + Self::Unlimited => Self::Unlimited, + Self::Limited(current) => Self::Limited(current.wrapping_sub(amount)), + } + } +} + // Shared state for a in-memory pipe. // // Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 @@ -69,20 +98,39 @@ struct Shared { write_wakers: Vec, read_wakers: Vec, is_closed: bool, + guarantees: Guarantees, } impl Shared { - fn new(cap: usize) -> Arc> { + fn new(cap: usize, guarantees: Guarantees) -> Arc> { let shared = Self { buf: BytesMut::new(), max_buffer_size: cap, write_wakers: Default::default(), read_wakers: Default::default(), is_closed: false, + guarantees, }; Arc::new(Mutex::new(shared)) } + fn set_cap(&mut self, cap: usize) -> bool { + if cap >= self.buf.len() { + // if cap > self.max_buffer_size { + // self.wake_writable(); + // } + self.max_buffer_size = cap; + self.wake_writable(); + true + } else { + false + } + } + + fn add_guarantees(&mut self, amount: u64) { + self.guarantees.add(amount); + } + fn close(&mut self) { self.is_closed = true; self.wake_writable(); @@ -129,14 +177,15 @@ impl Shared { if self.is_closed { return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); } - let avail = self.max_buffer_size - self.buf.len(); + let avail = self.remaining_write_capacity(); if avail == 0 { self.write_wakers.push(cx.waker().to_owned()); return Poll::Pending; } - let len = buf.len().min(avail); + let len = std::cmp::min(buf.len(), avail); self.buf.extend_from_slice(&buf[..len]); + self.guarantees.r#use(len as u64); self.wake_readable(); Poll::Ready(Ok(len)) } @@ -164,6 +213,7 @@ impl Shared { if let Some(slice) = self.writable_slice_exact(len) { let mut cursor = io::Cursor::new(slice); item.encode_into(&mut cursor).map_err(WriteError::Encode)?; + self.guarantees.r#use(len as u64); self.wake_readable(); Poll::Ready(Ok(())) } else { @@ -198,7 +248,10 @@ impl Shared { } fn remaining_write_capacity(&self) -> usize { - self.max_buffer_size - self.buf.len() + std::cmp::min( + self.max_buffer_size - self.buf.len(), + self.guarantees.get() as usize, + ) } fn wake_readable(&mut self) { @@ -275,9 +328,17 @@ impl Sender { self.shared.lock().unwrap().close() } + pub fn set_cap(&self, cap: usize) -> bool { + self.shared.lock().unwrap().set_cap(cap) + } + pub async fn send_message(&self, message: &T) -> Result<(), WriteError> { poll_fn(|cx| self.shared.lock().unwrap().poll_send_message(message, cx)).await } + + pub fn add_guarantees(&self, amount: u64) { + self.shared.lock().unwrap().add_guarantees(amount) + } } #[derive(Debug)] @@ -291,6 +352,10 @@ impl Receiver { self.shared.lock().unwrap().close() } + pub fn set_cap(&self, cap: usize) -> bool { + self.shared.lock().unwrap().set_cap(cap) + } + pub async fn recv_message(&self) -> Option> { poll_fn(|cx| self.shared.lock().unwrap().poll_recv_message(cx)).await } From 10a347b04a21d7d2b95129544814cb6f9f3adfb3 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 13 May 2024 16:10:04 +0200 Subject: [PATCH 032/198] further cleanup around APIs --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/actor.rs | 87 +++++++++++-- iroh-willow/src/net.rs | 188 ++++++++++++++------------- iroh-willow/src/session/coroutine.rs | 5 +- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/state.rs | 31 +++-- iroh-willow/src/util/channel.rs | 10 ++ 8 files changed, 208 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 332f775788..6b52b447f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2929,6 +2929,7 @@ dependencies = [ "futures", "futures-concurrency", "futures-lite 2.3.0", + "futures-util", "genawaiter", "hex", "iroh-base", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index c2dff70a00..081f1b0754 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -54,6 +54,7 @@ smallvec = "1.13.2" itertools = "0.12.1" futures-lite = "2.3.0" futures-concurrency = "7.6.0" +futures-util = "0.3.30" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 45b14f0b9d..b20dacffe7 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -9,7 +9,11 @@ use std::{ thread::JoinHandle, }; -use futures::{future::LocalBoxFuture, FutureExt}; +use futures_lite::{ + future::{Boxed as BoxFuture, BoxedLocal as LocalBoxFuture}, + stream::Stream, +}; +use futures_util::future::{FutureExt, Shared}; use genawaiter::{ sync::{Co, Gen}, GeneratorState, @@ -19,6 +23,7 @@ use tokio::sync::oneshot; use tracing::{debug, error, error_span, trace, warn, Span}; use crate::{ + net::InitialTransmission, proto::{ grouping::ThreeDRange, keys::NamespaceId, @@ -27,7 +32,7 @@ use crate::{ }, session::{ coroutine::{ControlRoutine, ReconcileRoutine}, - Channels, Error, SessionInit, SessionState, SharedSessionState, + Channels, Error, Role, SessionInit, SessionState, SharedSessionState, }, store::Store, }; @@ -37,7 +42,7 @@ pub const INBOX_CAP: usize = 1024; pub type SessionId = NodeId; #[derive(Debug, Clone)] -pub struct StoreHandle { +pub struct WillowHandle { tx: flume::Sender, join_handle: Arc>>, } @@ -85,8 +90,8 @@ impl Notifier { } } -impl StoreHandle { - pub fn spawn(store: S, me: NodeId) -> StoreHandle { +impl WillowHandle { + pub fn spawn(store: S, me: NodeId) -> WillowHandle { let (tx, rx) = flume::bounded(INBOX_CAP); // This channel only tracks wake to resume messages to coroutines, which are a sinlge u64 // per wakeup. We want to issue wake calls synchronosuly without blocking, so we use an @@ -116,7 +121,7 @@ impl StoreHandle { }) .expect("failed to spawn thread"); let join_handle = Arc::new(Some(join_handle)); - StoreHandle { tx, join_handle } + WillowHandle { tx, join_handle } } pub async fn send(&self, action: ToActor) -> anyhow::Result<()> { self.tx.send_async(action).await?; @@ -132,9 +137,56 @@ impl StoreHandle { reply_rx.await??; Ok(()) } + + pub async fn get_entries( + &self, + namespace: NamespaceId, + range: ThreeDRange, + ) -> anyhow::Result> { + let (tx, rx) = flume::bounded(1024); + self.send(ToActor::GetEntries { + namespace, + reply: tx, + range, + }) + .await?; + Ok(rx.into_stream()) + } + + pub async fn init_session( + &self, + peer: NodeId, + our_role: Role, + initial_transmission: InitialTransmission, + channels: Channels, + init: SessionInit, + ) -> anyhow::Result { + let state = SessionState::new(our_role, initial_transmission); + + let (on_finish_tx, on_finish_rx) = oneshot::channel(); + self.send(ToActor::InitSession { + peer, + state, + channels, + init, + on_finish: on_finish_tx, + }) + .await?; + + let on_finish = on_finish_rx + .map(|r| match r { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(Arc::new(err.into())), + Err(_) => Err(Arc::new(Error::ActorFailed)), + }) + .boxed(); + let on_finish = on_finish.shared(); + let handle = SessionHandle { on_finish }; + Ok(handle) + } } -impl Drop for StoreHandle { +impl Drop for WillowHandle { fn drop(&mut self) { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { @@ -146,6 +198,21 @@ impl Drop for StoreHandle { } } } + +#[derive(Debug)] +pub struct SessionHandle { + on_finish: Shared>>>, +} + +impl SessionHandle { + /// Wait for the session to finish. + /// + /// Returns an error if the session failed to complete. + pub async fn on_finish(self) -> Result<(), Arc> { + self.on_finish.await + } +} + #[derive(derive_more::Debug, strum::Display)] pub enum ToActor { InitSession { @@ -155,7 +222,7 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, - on_done: oneshot::Sender>, + on_finish: oneshot::Sender>, }, GetEntries { namespace: NamespaceId, @@ -195,7 +262,7 @@ pub struct StorageThread { next_coro_id: u64, } -type CoroFut = LocalBoxFuture<'static, Result<(), Error>>; +type CoroFut = LocalBoxFuture>; #[derive(derive_more::Debug)] struct CoroutineState { @@ -257,7 +324,7 @@ impl StorageThread { state, channels, init, - on_done, + on_finish: on_done, } => { let span = error_span!("session", peer=%peer.fmt_short()); let session = Session { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9de0d5d46d..a6d24ad278 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -4,20 +4,19 @@ use futures_concurrency::future::TryJoin; use iroh_base::{hash::Hash, key::NodeId}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, - sync::oneshot, task::JoinSet, }; use tracing::{debug, error_span, instrument, trace, warn, Instrument}; use crate::{ - actor::{StoreHandle, ToActor}, + actor::WillowHandle, proto::wgps::{ AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, session::{ channels::{Channels, LogicalChannelReceivers, LogicalChannelSenders}, - Role, SessionInit, SessionState, + Role, SessionInit, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -29,7 +28,7 @@ pub const CHANNEL_CAP: usize = 1024 * 64; #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=%peer.fmt_short()))] pub async fn run( me: NodeId, - store: StoreHandle, + store: WillowHandle, conn: quinn::Connection, peer: NodeId, our_role: Role, @@ -37,19 +36,15 @@ pub async fn run( ) -> anyhow::Result<()> { debug!(?our_role, "connected"); let mut join_set = JoinSet::new(); + let (mut control_send_stream, mut control_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, Role::Betty => conn.accept_bi().await?, }; control_send_stream.set_priority(i32::MAX)?; - let our_nonce: AccessChallenge = rand::random(); - let (received_commitment, max_payload_size) = exchange_commitments( - &mut control_send_stream, - &mut control_recv_stream, - &our_nonce, - ) - .await?; + let initial_transmission = + exchange_commitments(&mut control_send_stream, &mut control_recv_stream).await?; debug!("commitments exchanged"); let (control_send, control_recv) = spawn_channel( @@ -70,21 +65,12 @@ pub async fn run( logical_send, logical_recv, }; - let state = SessionState::new(our_role, our_nonce, received_commitment, max_payload_size); - - let (on_done, on_done_rx) = oneshot::channel(); - store - .send(ToActor::InitSession { - peer, - state, - channels, - init, - on_done, - }) + let handle = store + .init_session(peer, our_role, initial_transmission, channels, init) .await?; join_set.spawn(async move { - on_done_rx.await??; + handle.on_finish().await?; Ok(()) }); @@ -93,23 +79,6 @@ pub async fn run( Ok(()) } -async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { - let mut final_result = Ok(()); - while let Some(res) = join_set.join_next().await { - let res = match res { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(err), - Err(err) => Err(err.into()), - }; - if res.is_err() && final_result.is_ok() { - final_result = res; - } else if res.is_err() { - warn!("join error after initial error: {res:?}"); - } - } - final_result -} - #[derive(Debug, thiserror::Error)] #[error("missing channel: {0:?}")] struct MissingChannel(LogicalChannel); @@ -122,6 +91,8 @@ async fn open_logical_channels( let cap = CHANNEL_CAP; let channels = [LogicalChannel::Reconciliation, LogicalChannel::StaticToken]; let mut channels = match our_role { + // Alfie opens a quic stream for each logical channel, and sends a single byte with the + // channel id. Role::Alfie => { channels .map(|ch| { @@ -136,6 +107,8 @@ async fn open_logical_channels( .try_join() .await } + // Alfie accepts as many quick streams as there are logical channels, and reads a single + // byte on each, which is expected to contain a channel id. Role::Betty => { channels .map(|_| async { @@ -149,7 +122,7 @@ async fn open_logical_channels( } }?; - let mut take_channel = |ch| { + let mut take_and_spawn_channel = |ch| { channels .iter_mut() .find(|(c, _)| *c == ch) @@ -169,8 +142,8 @@ async fn open_logical_channels( }) }; - let rec = take_channel(LogicalChannel::Reconciliation)?; - let stt = take_channel(LogicalChannel::StaticToken)?; + let rec = take_and_spawn_channel(LogicalChannel::Reconciliation)?; + let stt = take_and_spawn_channel(LogicalChannel::StaticToken)?; Ok(( LogicalChannelSenders { reconciliation: rec.0, @@ -183,25 +156,6 @@ async fn open_logical_channels( )) } -// async fn open_logical_channel( -// join_set: &mut JoinSet>, -// conn: &quinn::Connection, -// ch: LogicalChannel, -// ) -> anyhow::Result<(Sender, Receiver)> { -// let (mut send_stream, recv_stream) = conn.open_bi().await?; -// send_stream.write_u8(ch as u8).await?; -// let cap = CHANNEL_CAP; -// Ok(spawn_channel( -// join_set, -// ch, -// cap, -// cap, -// Guarantees::Limited(0), -// send_stream, -// recv_stream, -// )) -// } - fn spawn_channel( join_set: &mut JoinSet>, ch: LogicalChannel, @@ -233,7 +187,8 @@ async fn recv_loop( mut recv_stream: quinn::RecvStream, mut channel_writer: Writer, ) -> anyhow::Result<()> { - while let Some(buf) = recv_stream.read_chunk(CHANNEL_CAP, true).await? { + let max_buffer_size = channel_writer.max_buffer_size(); + while let Some(buf) = recv_stream.read_chunk(max_buffer_size, true).await? { channel_writer.write_all(&buf.bytes[..]).await?; trace!(len = buf.bytes.len(), "recv"); } @@ -255,30 +210,61 @@ async fn send_loop( } async fn exchange_commitments( - send: &mut quinn::SendStream, - recv: &mut quinn::RecvStream, - our_nonce: &AccessChallenge, -) -> anyhow::Result<(ChallengeHash, usize)> { + send_stream: &mut quinn::SendStream, + recv_stream: &mut quinn::RecvStream, +) -> anyhow::Result { + let our_nonce: AccessChallenge = rand::random(); let challenge_hash = Hash::new(&our_nonce); - send.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; - send.write_all(challenge_hash.as_bytes()).await?; + send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; + send_stream.write_all(challenge_hash.as_bytes()).await?; let their_max_payload_size = { - let power = recv.read_u8().await?; + let power = recv_stream.read_u8().await?; ensure!(power <= 64, "max payload size too large"); - 2usize.pow(power as u32) + 2u64.pow(power as u32) }; let mut received_commitment = [0u8; CHALLENGE_HASH_LENGTH]; - recv.read_exact(&mut received_commitment).await?; - Ok((received_commitment, their_max_payload_size)) + recv_stream.read_exact(&mut received_commitment).await?; + Ok(InitialTransmission { + our_nonce, + received_commitment, + their_max_payload_size, + }) +} + +#[derive(Debug)] +pub struct InitialTransmission { + pub our_nonce: AccessChallenge, + pub received_commitment: ChallengeHash, + pub their_max_payload_size: u64, +} + +async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { + let mut final_result = Ok(()); + while let Some(res) = join_set.join_next().await { + let res = match res { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err), + Err(err) => Err(err.into()), + }; + if res.is_err() && final_result.is_ok() { + final_result = res; + } else if res.is_err() { + warn!("join error after initial error: {res:?}"); + } + } + final_result } #[cfg(test)] mod tests { - use std::{collections::HashSet, time::Instant}; + use std::{ + collections::HashSet, + time::{Instant}, + }; - use futures::StreamExt; + use futures_lite::StreamExt; use iroh_base::{hash::Hash, key::SecretKey}; use iroh_net::MagicEndpoint; use rand::SeedableRng; @@ -286,7 +272,7 @@ mod tests { use tracing::{debug, info}; use crate::{ - actor::{StoreHandle, ToActor}, + actor::WillowHandle, net::run, proto::{ grouping::{AreaOfInterest, ThreeDRange}, @@ -350,10 +336,10 @@ mod tests { let mut expected_entries = HashSet::new(); let store_alfie = MemoryStore::default(); - let handle_alfie = StoreHandle::spawn(store_alfie, node_id_alfie); + let handle_alfie = WillowHandle::spawn(store_alfie, node_id_alfie); let store_betty = MemoryStore::default(); - let handle_betty = StoreHandle::spawn(store_betty, node_id_betty); + let handle_betty = WillowHandle::spawn(store_betty, node_id_betty); let init_alfie = setup_and_insert( &mut rng, @@ -378,6 +364,34 @@ mod tests { println!("init took {:?}", start.elapsed()); let start = Instant::now(); + // tokio::task::spawn({ + // let handle_alfie = handle_alfie.clone(); + // let handle_betty = handle_betty.clone(); + // async move { + // loop { + // info!( + // "alfie count: {}", + // handle_alfie + // .get_entries(namespace_id, ThreeDRange::full()) + // .await + // .unwrap() + // .count() + // .await + // ); + // info!( + // "betty count: {}", + // handle_betty + // .get_entries(namespace_id, ThreeDRange::full()) + // .await + // .unwrap() + // .count() + // .await + // ); + // tokio::time::sleep(Duration::from_secs(1)).await; + // } + // } + // }); + let (res_alfie, res_betty) = tokio::join!( run( node_id_alfie, @@ -426,24 +440,20 @@ mod tests { Ok(()) } async fn get_entries( - store: &StoreHandle, + store: &WillowHandle, namespace: NamespaceId, ) -> anyhow::Result> { - let (tx, rx) = flume::bounded(1024); - store - .send(ToActor::GetEntries { - namespace, - reply: tx, - range: ThreeDRange::full(), - }) - .await?; - let entries: HashSet<_> = rx.into_stream().collect::>().await; + let entries: HashSet<_> = store + .get_entries(namespace, ThreeDRange::full()) + .await? + .collect::>() + .await; Ok(entries) } async fn setup_and_insert( rng: &mut impl CryptoRngCore, - store: &StoreHandle, + store: &WillowHandle, namespace_secret: &NamespaceSecretKey, count: usize, track_entries: &mut impl Extend, diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 28a81176b0..847fdb6960 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -8,7 +8,6 @@ use tracing::{debug, trace}; use crate::{ actor::{InitWithArea, WakeableCo, Yield}, - net::CHANNEL_CAP, proto::{ grouping::ThreeDRange, keys::NamespaceId, @@ -24,6 +23,8 @@ use crate::{ util::channel::{ReadError, WriteError}, }; +const INITIAL_GUARANTEES: u64 = u64::MAX; + #[derive(derive_more::Debug)] pub struct ControlRoutine { channels: Channels, @@ -43,7 +44,7 @@ impl ControlRoutine { let reveal_message = self.state().commitment_reveal()?; self.send(reveal_message).await?; let msg = ControlIssueGuarantee { - amount: CHANNEL_CAP as u64, + amount: INITIAL_GUARANTEES, channel: LogicalChannel::Reconciliation, }; self.send(msg).await?; diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 694ba965bd..9580fa5b32 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -41,6 +41,8 @@ pub enum Error { InvalidParameters(&'static str), #[error("reached an invalid state")] InvalidState(&'static str), + #[error("actor failed to respond")] + ActorFailed, } impl From for Error { diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 5f18efd6c3..fb3d2f2de7 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -2,14 +2,17 @@ use std::{cell::RefCell, collections::HashSet, rc::Rc}; use tracing::warn; -use crate::proto::{ - challenge::ChallengeState, - grouping::ThreeDRange, - keys::{NamespaceId, UserSecretKey}, - wgps::{ - AccessChallenge, AreaOfInterestHandle, CapabilityHandle, ChallengeHash, CommitmentReveal, - IntersectionHandle, Message, ReadCapability, SetupBindAreaOfInterest, - SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, +use crate::{ + net::InitialTransmission, + proto::{ + challenge::ChallengeState, + grouping::ThreeDRange, + keys::{NamespaceId, UserSecretKey}, + wgps::{ + AreaOfInterestHandle, CapabilityHandle, CommitmentReveal, IntersectionHandle, Message, + ReadCapability, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, + StaticToken, StaticTokenHandle, + }, }, }; @@ -28,16 +31,12 @@ pub struct SessionState { } impl SessionState { - pub fn new( - our_role: Role, - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - _their_maximum_payload_size: usize, - ) -> Self { + pub fn new(our_role: Role, initial_transmission: InitialTransmission) -> Self { let challenge_state = ChallengeState::Committed { - our_nonce, - received_commitment, + our_nonce: initial_transmission.our_nonce, + received_commitment: initial_transmission.received_commitment, }; + // TODO: make use of initial_transmission.their_max_payload_size. Self { our_role, challenge: challenge_state, diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 6fe887649e..5a7e4c408a 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -158,6 +158,13 @@ impl Shared { } fn writable_slice_exact(&mut self, len: usize) -> Option<&mut [u8]> { + tracing::trace!( + "write {}, remaining {} (guarantees {}, buf capacity {})", + len, + self.remaining_write_capacity(), + self.guarantees.get(), + self.max_buffer_size - self.buf.len() + ); if self.remaining_write_capacity() < len { None } else { @@ -290,6 +297,9 @@ impl Writer { pub fn close(&self) { self.shared.lock().unwrap().close() } + pub fn max_buffer_size(&self) -> usize { + self.shared.lock().unwrap().max_buffer_size + } } impl AsyncWrite for Writer { From ffd63d6836ece6e8b4f9487f4d0091836114d96d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 13 May 2024 16:25:50 +0200 Subject: [PATCH 033/198] cleanup --- Cargo.lock | 34 ---------- iroh-willow/Cargo.toml | 38 +++-------- iroh-willow/src/actor.rs | 98 +++++++++++++--------------- iroh-willow/src/net.rs | 46 ++++++------- iroh-willow/src/session/channels.rs | 6 +- iroh-willow/src/session/coroutine.rs | 10 +-- 6 files changed, 82 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b52b447f2..91b9205509 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2926,41 +2926,27 @@ dependencies = [ "derive_more", "ed25519-dalek", "flume", - "futures", "futures-concurrency", "futures-lite 2.3.0", "futures-util", "genawaiter", - "hex", "iroh-base", "iroh-metrics", "iroh-net", "iroh-test", - "itertools 0.12.1", - "num_enum", - "once_cell", - "parking_lot", "postcard", "proptest", - "quinn", "rand", "rand_chacha", "rand_core", - "rayon", "redb 2.1.0", - "rtrb", - "self_cell", "serde", - "smallvec", "strum 0.25.0", "tempfile", "test-strategy", "thiserror", "tokio", - "tokio-stream", - "tokio-util", "tracing", - "tracing-chrome", "tracing-subscriber", "zerocopy 0.8.0-alpha.7", ] @@ -4624,15 +4610,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtrb" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636b228b5adb32add7f0e41e5084d38aa66fb0f942e8a91751c1e90023288fbe" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "rustc-demangle" version = "0.1.24" @@ -5953,17 +5930,6 @@ dependencies = [ "syn 2.0.66", ] -[[package]] -name = "tracing-chrome" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf0a738ed5d6450a9fb96e86a23ad808de2b727fd1394585da5cdd6788ffe724" -dependencies = [ - "serde_json", - "tracing-core", - "tracing-subscriber", -] - [[package]] name = "tracing-core" version = "0.1.32" diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 081f1b0754..dd3fce8b99 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -16,45 +16,27 @@ workspace = true [dependencies] anyhow = "1" +bytes = { version = "1.4", features = ["serde"] } derive_more = { version = "1.0.0-beta.1", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } flume = "0.11" +futures-concurrency = "7.6.0" +futures-lite = "2.3.0" +futures-util = "0.3.30" +genawaiter = "0.99.1" iroh-base = { version = "0.17.0", path = "../iroh-base" } iroh-metrics = { version = "0.17.0", path = "../iroh-metrics", optional = true } -num_enum = "0.7" +iroh-net = { version = "0.17.0", path = "../iroh-net" } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = "0.8.5" rand_core = "0.6.4" +redb = { version = "2.0.0" } serde = { version = "1.0.164", features = ["derive"] } strum = { version = "0.25", features = ["derive"] } -bytes = { version = "1.4", features = ["serde"] } -hex = "0.4" thiserror = "1" -tracing = "0.1" tokio = { version = "1", features = ["sync"] } - -# fs-store -redb = { version = "2.0.0" } -tempfile = { version = "3.4" } - -# net -iroh-net = { version = "0.17.0", optional = true, path = "../iroh-net" } -tokio-util = { version = "0.7", optional = true, features = ["codec", "io-util", "io"] } -tokio-stream = { version = "0.1", optional = true, features = ["sync"]} -quinn = { version = "0.10", optional = true } -futures = { version = "0.3", optional = true } -self_cell = "1.0.3" +tracing = "0.1" zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } -genawaiter = "0.99.1" -rtrb = "0.3.0" -parking_lot = "0.12.2" -once_cell = "1.19.0" -rayon = "1.10.0" -smallvec = "1.13.2" -itertools = "0.12.1" -futures-lite = "2.3.0" -futures-concurrency = "7.6.0" -futures-util = "0.3.30" [dev-dependencies] iroh-test = { path = "../iroh-test" } @@ -63,10 +45,8 @@ tokio = { version = "1", features = ["sync", "macros"] } proptest = "1.2.0" tempfile = "3.4" test-strategy = "0.3.1" -tracing-chrome = "0.7.2" tracing-subscriber = "0.3.18" [features] -default = ["net", "metrics"] -net = ["iroh-net", "tokio/io-util", "tokio-stream", "tokio-util", "quinn", "futures"] +default = ["metrics"] metrics = ["iroh-metrics"] diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index b20dacffe7..822f7fe82a 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -42,56 +42,13 @@ pub const INBOX_CAP: usize = 1024; pub type SessionId = NodeId; #[derive(Debug, Clone)] -pub struct WillowHandle { +pub struct ActorHandle { tx: flume::Sender, join_handle: Arc>>, } -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] -pub enum Interest { - Send, - Recv, -} - -#[derive(Debug, Clone)] -pub struct AssignedWaker { - waker: Notifier, - coro_id: CoroId, -} - -impl AssignedWaker { - pub fn wake(&self) { - self.waker.wake(self.coro_id) - } -} - -impl Wake for AssignedWaker { - fn wake(self: Arc) { - self.waker.wake(self.coro_id) - } -} - -#[derive(Debug, Clone)] -pub struct Notifier { - tx: flume::Sender, -} - -impl Notifier { - pub fn wake(&self, coro_id: CoroId) { - self.tx.send(coro_id).ok(); - } - - pub fn create_waker(&self, coro_id: CoroId) -> std::task::Waker { - Arc::new(AssignedWaker { - waker: self.clone(), - coro_id, - }) - .into() - } -} - -impl WillowHandle { - pub fn spawn(store: S, me: NodeId) -> WillowHandle { +impl ActorHandle { + pub fn spawn(store: S, me: NodeId) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); // This channel only tracks wake to resume messages to coroutines, which are a sinlge u64 // per wakeup. We want to issue wake calls synchronosuly without blocking, so we use an @@ -121,7 +78,7 @@ impl WillowHandle { }) .expect("failed to spawn thread"); let join_handle = Arc::new(Some(join_handle)); - WillowHandle { tx, join_handle } + ActorHandle { tx, join_handle } } pub async fn send(&self, action: ToActor) -> anyhow::Result<()> { self.tx.send_async(action).await?; @@ -186,7 +143,7 @@ impl WillowHandle { } } -impl Drop for WillowHandle { +impl Drop for ActorHandle { fn drop(&mut self) { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { @@ -410,7 +367,7 @@ impl StorageThread { fn start_coroutine( &mut self, session_id: SessionId, - create_fn: impl FnOnce(WakeableCo, &mut Session) -> CoroFut, + create_fn: impl FnOnce(WakeableCoro, &mut Session) -> CoroFut, span_fn: impl FnOnce() -> Span, ) -> Result<(), Error> { let session = self @@ -432,7 +389,7 @@ impl StorageThread { drop(_guard); let gen = Gen::new(move |co| { - let co = WakeableCo::new(co, waker); + let co = WakeableCoro::new(co, waker); create_fn(co, session) }); let state = CoroutineState { @@ -491,13 +448,13 @@ pub enum Yield { } #[derive(derive_more::Debug)] -pub struct WakeableCo { +pub struct WakeableCoro { pub waker: Waker, #[debug(skip)] pub co: Co, } -impl WakeableCo { +impl WakeableCoro { pub fn new(co: Co, waker: Waker) -> Self { Self { co, waker } } @@ -524,3 +481,40 @@ impl WakeableCo { Pin::new(&mut fut).poll(&mut ctx) } } + +#[derive(Debug, Clone)] +pub struct CoroWaker { + waker: Notifier, + coro_id: CoroId, +} + +impl CoroWaker { + pub fn wake(&self) { + self.waker.wake(self.coro_id) + } +} + +impl Wake for CoroWaker { + fn wake(self: Arc) { + self.waker.wake(self.coro_id) + } +} + +#[derive(Debug, Clone)] +pub struct Notifier { + tx: flume::Sender, +} + +impl Notifier { + pub fn wake(&self, coro_id: CoroId) { + self.tx.send(coro_id).ok(); + } + + pub fn create_waker(&self, coro_id: CoroId) -> std::task::Waker { + Arc::new(CoroWaker { + waker: self.clone(), + coro_id, + }) + .into() + } +} diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index a6d24ad278..75d1fefc03 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,7 +1,8 @@ use anyhow::ensure; -use futures::TryFutureExt; use futures_concurrency::future::TryJoin; +use futures_util::future::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; +use iroh_net::magic_endpoint::{Connection, RecvStream, SendStream}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, @@ -9,7 +10,7 @@ use tokio::{ use tracing::{debug, error_span, instrument, trace, warn, Instrument}; use crate::{ - actor::WillowHandle, + actor::ActorHandle, proto::wgps::{ AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, @@ -28,8 +29,8 @@ pub const CHANNEL_CAP: usize = 1024 * 64; #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=%peer.fmt_short()))] pub async fn run( me: NodeId, - store: WillowHandle, - conn: quinn::Connection, + actor: ActorHandle, + conn: Connection, peer: NodeId, our_role: Role, init: SessionInit, @@ -65,7 +66,7 @@ pub async fn run( logical_send, logical_recv, }; - let handle = store + let handle = actor .init_session(peer, our_role, initial_transmission, channels, init) .await?; @@ -85,7 +86,7 @@ struct MissingChannel(LogicalChannel); async fn open_logical_channels( join_set: &mut JoinSet>, - conn: quinn::Connection, + conn: Connection, our_role: Role, ) -> anyhow::Result<(LogicalChannelSenders, LogicalChannelReceivers)> { let cap = CHANNEL_CAP; @@ -162,8 +163,8 @@ fn spawn_channel( send_cap: usize, recv_cap: usize, guarantees: Guarantees, - send_stream: quinn::SendStream, - recv_stream: quinn::RecvStream, + send_stream: SendStream, + recv_stream: RecvStream, ) -> (Sender, Receiver) { let (sender, outbound_reader) = outbound_channel(send_cap, guarantees); let (inbound_writer, recveiver) = inbound_channel(recv_cap); @@ -183,10 +184,7 @@ fn spawn_channel( (sender, recveiver) } -async fn recv_loop( - mut recv_stream: quinn::RecvStream, - mut channel_writer: Writer, -) -> anyhow::Result<()> { +async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> anyhow::Result<()> { let max_buffer_size = channel_writer.max_buffer_size(); while let Some(buf) = recv_stream.read_chunk(max_buffer_size, true).await? { channel_writer.write_all(&buf.bytes[..]).await?; @@ -196,10 +194,7 @@ async fn recv_loop( Ok(()) } -async fn send_loop( - mut send_stream: quinn::SendStream, - channel_reader: Reader, -) -> anyhow::Result<()> { +async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyhow::Result<()> { while let Some(data) = channel_reader.read_bytes().await { let len = data.len(); send_stream.write_chunk(data).await?; @@ -210,8 +205,8 @@ async fn send_loop( } async fn exchange_commitments( - send_stream: &mut quinn::SendStream, - recv_stream: &mut quinn::RecvStream, + send_stream: &mut SendStream, + recv_stream: &mut RecvStream, ) -> anyhow::Result { let our_nonce: AccessChallenge = rand::random(); let challenge_hash = Hash::new(&our_nonce); @@ -259,10 +254,7 @@ async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<( #[cfg(test)] mod tests { - use std::{ - collections::HashSet, - time::{Instant}, - }; + use std::{collections::HashSet, time::Instant}; use futures_lite::StreamExt; use iroh_base::{hash::Hash, key::SecretKey}; @@ -272,7 +264,7 @@ mod tests { use tracing::{debug, info}; use crate::{ - actor::WillowHandle, + actor::ActorHandle, net::run, proto::{ grouping::{AreaOfInterest, ThreeDRange}, @@ -336,10 +328,10 @@ mod tests { let mut expected_entries = HashSet::new(); let store_alfie = MemoryStore::default(); - let handle_alfie = WillowHandle::spawn(store_alfie, node_id_alfie); + let handle_alfie = ActorHandle::spawn(store_alfie, node_id_alfie); let store_betty = MemoryStore::default(); - let handle_betty = WillowHandle::spawn(store_betty, node_id_betty); + let handle_betty = ActorHandle::spawn(store_betty, node_id_betty); let init_alfie = setup_and_insert( &mut rng, @@ -440,7 +432,7 @@ mod tests { Ok(()) } async fn get_entries( - store: &WillowHandle, + store: &ActorHandle, namespace: NamespaceId, ) -> anyhow::Result> { let entries: HashSet<_> = store @@ -453,7 +445,7 @@ mod tests { async fn setup_and_insert( rng: &mut impl CryptoRngCore, - store: &WillowHandle, + store: &ActorHandle, namespace_secret: &NamespaceSecretKey, count: usize, track_entries: &mut impl Extend, diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 846b2c45da..5c731856a2 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -1,7 +1,7 @@ use tracing::debug; use crate::{ - actor::WakeableCo, + actor::WakeableCoro, proto::wgps::{LogicalChannel, Message}, util::channel::{ReadError, Receiver, Sender, WriteError}, }; @@ -66,7 +66,7 @@ impl Channels { pub async fn send_co( &self, - co: &WakeableCo, + co: &WakeableCoro, message: impl Into, ) -> Result<(), WriteError> { let message = message.into(); @@ -79,7 +79,7 @@ impl Channels { pub async fn recv_co( &self, - co: &WakeableCo, + co: &WakeableCoro, channel: LogicalChannel, ) -> Option> { let message = co.yield_wake(self.receiver(channel).recv_message()).await; diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 847fdb6960..1336bac75b 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -7,7 +7,7 @@ use std::{ use tracing::{debug, trace}; use crate::{ - actor::{InitWithArea, WakeableCo, Yield}, + actor::{InitWithArea, WakeableCoro, Yield}, proto::{ grouping::ThreeDRange, keys::NamespaceId, @@ -29,10 +29,10 @@ const INITIAL_GUARANTEES: u64 = u64::MAX; pub struct ControlRoutine { channels: Channels, state: SharedSessionState, - co: WakeableCo, + co: WakeableCoro, } impl ControlRoutine { - pub fn new(co: WakeableCo, channels: Channels, state: SharedSessionState) -> Self { + pub fn new(co: WakeableCoro, channels: Channels, state: SharedSessionState) -> Self { Self { channels, state, @@ -144,14 +144,14 @@ pub struct ReconcileRoutine { store_writer: Rc>, channels: Channels, state: SharedSessionState, - co: WakeableCo, + co: WakeableCoro, } // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. impl ReconcileRoutine { pub fn new( - co: WakeableCo, + co: WakeableCoro, channels: Channels, state: SharedSessionState, store_snapshot: Rc, From 424f7703e04cbf782720bc12abf72d59376fa41b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 13 May 2024 19:25:12 +0200 Subject: [PATCH 034/198] better async handling --- iroh-willow/src/actor.rs | 4 +- iroh-willow/src/session/coroutine.rs | 29 +++------------ iroh-willow/src/session/resource.rs | 39 +++++++------------- iroh-willow/src/session/state.rs | 55 +++++++++++++++++++++++++--- 4 files changed, 69 insertions(+), 58 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 822f7fe82a..aa8dfb679c 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -285,7 +285,7 @@ impl StorageThread { } => { let span = error_span!("session", peer=%peer.fmt_short()); let session = Session { - state: Rc::new(RefCell::new(state)), + state: SharedSessionState::new(state), channels, coroutines: Default::default(), span, @@ -435,8 +435,6 @@ impl StorageThread { } } } - - // fn on_coroutine_complete(&mut self, id: CoroId) } pub type InitWithArea = (AreaOfInterestHandle, AreaOfInterestHandle); diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 1336bac75b..2f74407460 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,7 +1,6 @@ use std::{ cell::{RefCell, RefMut}, rc::Rc, - task::Poll, }; use tracing::{debug, trace}; @@ -14,7 +13,7 @@ use crate::{ wgps::{ AreaOfInterestHandle, ControlIssueGuarantee, Fingerprint, LengthyEntry, LogicalChannel, Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, }, willow::AuthorisedEntry, }, @@ -334,7 +333,11 @@ impl ReconcileRoutine { async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { let static_token = self - .get_static_token_eventually(message.static_token_handle) + .co + .yield_wake( + self.state + .get_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle), + ) .await; self.state().on_send_entry()?; @@ -475,26 +478,6 @@ impl ReconcileRoutine { Ok(()) } - async fn get_static_token_eventually(&mut self, handle: StaticTokenHandle) -> StaticToken { - // TODO: We can't use co.yield_wake here because we have to drop state before yielding - loop { - let mut state = self.state.borrow_mut(); - let fut = state - .their_resources - .static_tokens - .get_eventually_cloned(handle); - match self.co.poll_once(fut) { - Poll::Ready(output) => break output, - Poll::Pending => { - // We need to drop state here, otherwise the RefMut on state would hold - // across the yield. - drop(state); - self.co.yield_(Yield::Pending).await; - } - } - } - } - fn state(&mut self) -> RefMut { self.state.borrow_mut() } diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 71d7fc765e..7e056f68f5 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, - task::{Poll, Waker}, + task::{Context, Poll, Waker}, }; use crate::proto::wgps::{ @@ -123,31 +123,18 @@ where }) .await } - // pub async fn get_eventually(&self, handle: &H) -> Result<&R, Error> { - // if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { - // Some(resource) - // } else { - // // self.on_notify(handle) - // } - // } - - // pub fn get_or_notify(&mut self, handle: &H, notifier: CoroutineWaker) -> Option<&R> { - // if let Some(resource) = self.map.get(handle).as_ref().map(|r| &r.value) { - // Some(resource) - // } else { - // self.register_waker(*handle, notifier); - // None - // } - // } -} -impl ResourceMap -where - H: IsHandle, - R: Eq + PartialEq + Clone + 'static, -{ - pub async fn get_eventually_cloned(&mut self, handle: H) -> R { - let out = self.get_eventually(handle).await; - (*out).clone() + + pub fn poll_get_eventually(&mut self, handle: H, cx: &mut Context<'_>) -> Poll<&R> { + // cannot use self.get() and self.register_waker() here due to borrow checker. + if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { + Poll::Ready(resource) + } else { + self.wakers + .entry(handle) + .or_default() + .push_back(cx.waker().to_owned()); + Poll::Pending + } } } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index fb3d2f2de7..e41a7e14c6 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,5 +1,11 @@ -use std::{cell::RefCell, collections::HashSet, rc::Rc}; +use std::{ + cell::{RefCell, RefMut}, + collections::HashSet, + rc::Rc, + task::Poll, +}; +use futures_lite::future::poll_fn; use tracing::warn; use crate::{ @@ -9,15 +15,52 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceId, UserSecretKey}, wgps::{ - AreaOfInterestHandle, CapabilityHandle, CommitmentReveal, IntersectionHandle, Message, - ReadCapability, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, - StaticToken, StaticTokenHandle, + AreaOfInterestHandle, CapabilityHandle, CommitmentReveal, IntersectionHandle, IsHandle, + Message, ReadCapability, SetupBindAreaOfInterest, SetupBindReadCapability, + SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }, }; -use super::{resource::ScopedResources, Error, Role, Scope}; -pub type SharedSessionState = Rc>; +use super::{ + resource::{ResourceMap, ScopedResources}, + Error, Role, Scope, +}; + +#[derive(derive_more::Debug, Clone)] +pub struct SharedSessionState { + inner: Rc>, +} + +impl SharedSessionState { + pub fn new(state: SessionState) -> Self { + Self { + inner: Rc::new(RefCell::new(state)), + } + } + pub async fn get_resource_eventually( + &self, + selector: F, + handle: H, + ) -> R + where + F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + { + let inner = self.inner.clone(); + poll_fn(move |cx| { + let mut inner = inner.borrow_mut(); + let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).their_resources); + let r = std::task::ready!(res.poll_get_eventually(handle, cx)); + Poll::Ready(r.clone()) + }) + .await + } + + pub fn borrow_mut(&self) -> RefMut { + self.inner.borrow_mut() + } +} +// impl SharedSessio #[derive(Debug)] pub struct SessionState { From b39e74361f1771b912ccd702522832437b439810 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 14 May 2024 21:11:58 +0200 Subject: [PATCH 035/198] wip: use current-thread executor instead of manual coroutines --- Cargo.lock | 2 +- iroh-willow/Cargo.toml | 2 +- iroh-willow/src/actor.rs | 348 ++++-------------------- iroh-willow/src/net.rs | 110 +++++--- iroh-willow/src/proto/wgps.rs | 167 +++++++++--- iroh-willow/src/proto/willow.rs | 18 ++ iroh-willow/src/session.rs | 9 + iroh-willow/src/session/channels.rs | 190 +++++++++---- iroh-willow/src/session/coroutine.rs | 385 ++++++++++++++++++--------- iroh-willow/src/session/error.rs | 4 + iroh-willow/src/session/state.rs | 184 ++++++++++--- iroh-willow/src/util.rs | 1 + iroh-willow/src/util/channel.rs | 17 +- 13 files changed, 842 insertions(+), 595 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91b9205509..9b7ddca286 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2941,7 +2941,7 @@ dependencies = [ "rand_core", "redb 2.1.0", "serde", - "strum 0.25.0", + "strum 0.26.2", "tempfile", "test-strategy", "thiserror", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index dd3fce8b99..6174230230 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -32,7 +32,7 @@ rand = "0.8.5" rand_core = "0.6.4" redb = { version = "2.0.0" } serde = { version = "1.0.164", features = ["derive"] } -strum = { version = "0.25", features = ["derive"] } +strum = { version = "0.26", features = ["derive"] } thiserror = "1" tokio = { version = "1", features = ["sync"] } tracing = "0.1" diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index aa8dfb679c..c81cdd240d 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -1,26 +1,10 @@ -use std::{ - cell::RefCell, - collections::{HashMap, HashSet}, - future::Future, - pin::Pin, - rc::Rc, - sync::Arc, - task::{Context, Poll, Wake, Waker}, - thread::JoinHandle, -}; +use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc, thread::JoinHandle}; -use futures_lite::{ - future::{Boxed as BoxFuture, BoxedLocal as LocalBoxFuture}, - stream::Stream, -}; +use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{FutureExt, Shared}; -use genawaiter::{ - sync::{Co, Gen}, - GeneratorState, -}; use iroh_base::key::NodeId; use tokio::sync::oneshot; -use tracing::{debug, error, error_span, trace, warn, Span}; +use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ net::InitialTransmission, @@ -31,10 +15,11 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - coroutine::{ControlRoutine, ReconcileRoutine}, - Channels, Error, Role, SessionInit, SessionState, SharedSessionState, + coroutine::ControlRoutine, Channels, Error, Role, SessionInit, SessionState, + SharedSessionState, }, store::Store, + util::task_set::{TaskKey, TaskSet}, }; pub const INBOX_CAP: usize = 1024; @@ -50,27 +35,17 @@ pub struct ActorHandle { impl ActorHandle { pub fn spawn(store: S, me: NodeId) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); - // This channel only tracks wake to resume messages to coroutines, which are a sinlge u64 - // per wakeup. We want to issue wake calls synchronosuly without blocking, so we use an - // unbounded channel here. The actual capacity is bounded by the number of sessions times - // the number of coroutines per session (which is fixed, currently at 2). - let (notify_tx, notify_rx) = flume::unbounded(); - // let actor_tx = tx.clone(); - let waker = Notifier { tx: notify_tx }; let join_handle = std::thread::Builder::new() .name("sync-actor".to_string()) .spawn(move || { let span = error_span!("willow_thread", me=%me.fmt_short()); let _guard = span.enter(); - let mut actor = StorageThread { + let actor = StorageThread { store: Rc::new(RefCell::new(store)), sessions: Default::default(), - coroutines: Default::default(), - next_coro_id: Default::default(), inbox_rx: rx, - notify_rx, - notifier: waker, + tasks: Default::default(), }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -198,80 +173,55 @@ pub enum ToActor { } #[derive(Debug)] -struct Session { - state: SharedSessionState, - channels: Channels, - coroutines: HashSet, - span: Span, +struct ActiveSession { on_done: oneshot::Sender>, + task_key: TaskKey, + // state: SharedSessionState } -type CoroId = u64; - #[derive(Debug)] pub struct StorageThread { inbox_rx: flume::Receiver, - notify_rx: flume::Receiver, store: Rc>, - sessions: HashMap, - coroutines: HashMap, - notifier: Notifier, - next_coro_id: u64, -} - -type CoroFut = LocalBoxFuture>; - -#[derive(derive_more::Debug)] -struct CoroutineState { - id: CoroId, - session_id: SessionId, - #[debug("Generator")] - gen: Gen, - span: Span, + sessions: HashMap, + tasks: TaskSet<(SessionId, Result<(), Error>)>, } impl StorageThread { - pub fn run(&mut self) -> anyhow::Result<()> { - enum Op { - Inbox(ToActor), - Notify(CoroId), - } + pub fn run(self) -> anyhow::Result<()> { + let rt = tokio::runtime::Builder::new_current_thread() + .build() + .expect("failed to start current-thread runtime for willow actor"); + let local_set = tokio::task::LocalSet::new(); + local_set.block_on(&rt, async move { self.run_async().await }) + } + async fn run_async(mut self) -> anyhow::Result<()> { loop { - let op = flume::Selector::new() - .recv(&self.inbox_rx, |r| r.map(Op::Inbox)) - .recv(&self.notify_rx, |r| r.map(Op::Notify)) - .wait(); - - let Ok(op) = op else { - break; - }; - - match op { - Op::Inbox(ToActor::Shutdown { reply }) => { - if let Some(reply) = reply { - reply.send(()).ok(); + tokio::select! { + msg = self.inbox_rx.recv_async() => match msg { + Err(_) => break, + Ok(ToActor::Shutdown { reply }) => { + if let Some(reply) = reply { + reply.send(()).ok(); + } + break; + } + Ok(msg) => self.handle_message(msg)?, + }, + Some((_key, res)) = self.tasks.next(), if !self.tasks.is_empty() => match res { + Ok((id, res)) => { + self.complete_session(&id, res); + } + Err(err) => { + warn!("task failed to join: {err}"); + return Err(err.into()); } - break; } - Op::Inbox(message) => self.handle_message(message)?, - Op::Notify(coro_id) => self.handle_resume(coro_id), - } + }; } Ok(()) } - fn handle_resume(&mut self, coro_id: CoroId) { - if let Some(coro) = self.coroutines.remove(&coro_id) { - let session_id = coro.session_id; - if let Err(error) = self.resume_coroutine(coro) { - warn!(?error, session=%session_id.fmt_short(), "abort session: coroutine failed"); - self.remove_session(&session_id, Err(error)); - } - } else { - debug!(%coro_id, "received wakeup for dropped coroutine"); - } - } - fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { trace!(%message, "tick: handle_message"); match message { @@ -283,20 +233,21 @@ impl StorageThread { init, on_finish: on_done, } => { + // self.init_session(peer, state, channels, init, on_finish); let span = error_span!("session", peer=%peer.fmt_short()); - let session = Session { - state: SharedSessionState::new(state), - channels, - coroutines: Default::default(), - span, - on_done, - }; + let session_id = peer; + + // let Channels { send, recv } = channels; + // let store = self.store.clone(); + // let state = SharedSessionState::new(state, send, store, reconcile_state); + + let fut = ControlRoutine::run(channels, state, self.store.clone(), init); + let fut = fut.instrument(span.clone()); + let task_key = self + .tasks + .spawn_local(async move { (session_id, fut.await) }); + let session = ActiveSession { on_done, task_key }; self.sessions.insert(peer, session); - - if let Err(error) = self.start_control_routine(peer, init) { - warn!(?error, peer=%peer.fmt_short(), "abort session: starting failed"); - self.remove_session(&peer, Err(error)); - } } ToActor::GetEntries { namespace, @@ -317,202 +268,15 @@ impl StorageThread { Ok(()) } - fn remove_session(&mut self, peer: &NodeId, result: Result<(), Error>) { + fn complete_session(&mut self, peer: &NodeId, result: Result<(), Error>) { let session = self.sessions.remove(peer); if let Some(session) = session { - session.channels.close_all(); + self.tasks.remove(session.task_key); session.on_done.send(result).ok(); - for coro_id in session.coroutines { - self.coroutines.remove(&coro_id); - } } else { warn!("remove_session called for unknown session"); } } - - fn start_control_routine( - &mut self, - session_id: SessionId, - init: SessionInit, - ) -> Result<(), Error> { - let create_fn = |co, session: &mut Session| { - let channels = session.channels.clone(); - let state = session.state.clone(); - ControlRoutine::new(co, channels, state) - .run(init) - .boxed_local() - }; - let span_fn = || error_span!("control"); - self.start_coroutine(session_id, create_fn, span_fn) - } - - fn start_reconcile_routine( - &mut self, - session_id: SessionId, - start: Option, - ) -> Result<(), Error> { - let store_snapshot = Rc::new(self.store.borrow_mut().snapshot()?); - let store_writer = Rc::clone(&self.store); - let create_fn = |co, session: &mut Session| { - let channels = session.channels.clone(); - let state = session.state.clone(); - ReconcileRoutine::new(co, channels, state, store_snapshot, store_writer) - .run(start) - .boxed_local() - }; - let span_fn = || error_span!("reconcile"); - self.start_coroutine(session_id, create_fn, span_fn) - } - - fn start_coroutine( - &mut self, - session_id: SessionId, - create_fn: impl FnOnce(WakeableCoro, &mut Session) -> CoroFut, - span_fn: impl FnOnce() -> Span, - ) -> Result<(), Error> { - let session = self - .sessions - .get_mut(&session_id) - .ok_or(Error::SessionNotFound)?; - - let id = { - let id = self.next_coro_id; - self.next_coro_id += 1; - id - }; - - session.coroutines.insert(id); - let waker = self.notifier.create_waker(id); - - let _guard = session.span.enter(); - let span = span_fn(); - drop(_guard); - - let gen = Gen::new(move |co| { - let co = WakeableCoro::new(co, waker); - create_fn(co, session) - }); - let state = CoroutineState { - id, - session_id, - gen, - span, - }; - self.resume_coroutine(state) - } - - fn resume_coroutine(&mut self, mut coro: CoroutineState) -> Result<(), Error> { - let _guard = coro.span.enter(); - trace!("resume"); - loop { - match coro.gen.resume() { - GeneratorState::Yielded(yielded) => { - trace!(?yielded, "yield"); - match yielded { - Yield::Pending => { - drop(_guard); - self.coroutines.insert(coro.id, coro); - break Ok(()); - } - Yield::StartReconciliation(start) => { - self.start_reconcile_routine(coro.session_id, start)?; - } - } - } - GeneratorState::Complete(res) => { - let session = self - .sessions - .get_mut(&coro.session_id) - .ok_or(Error::SessionNotFound)?; - session.coroutines.remove(&coro.id); - let is_last = session.coroutines.is_empty(); - debug!(?res, ?is_last, "routine completed"); - if res.is_err() || is_last { - self.remove_session(&coro.session_id, res) - } - break Ok(()); - } - } - } - } } -pub type InitWithArea = (AreaOfInterestHandle, AreaOfInterestHandle); - -#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] -pub enum Yield { - Pending, - StartReconciliation(Option), -} - -#[derive(derive_more::Debug)] -pub struct WakeableCoro { - pub waker: Waker, - #[debug(skip)] - pub co: Co, -} - -impl WakeableCoro { - pub fn new(co: Co, waker: Waker) -> Self { - Self { co, waker } - } - pub async fn yield_(&self, value: Yield) { - self.co.yield_(value).await - } - - pub async fn yield_wake(&self, fut: impl Future) -> T { - tokio::pin!(fut); - let mut ctx = Context::from_waker(&self.waker); - loop { - match Pin::new(&mut fut).poll(&mut ctx) { - Poll::Ready(output) => return output, - Poll::Pending => { - self.co.yield_(Yield::Pending).await; - } - } - } - } - - pub fn poll_once(&self, fut: impl Future) -> Poll { - tokio::pin!(fut); - let mut ctx = Context::from_waker(&self.waker); - Pin::new(&mut fut).poll(&mut ctx) - } -} - -#[derive(Debug, Clone)] -pub struct CoroWaker { - waker: Notifier, - coro_id: CoroId, -} - -impl CoroWaker { - pub fn wake(&self) { - self.waker.wake(self.coro_id) - } -} - -impl Wake for CoroWaker { - fn wake(self: Arc) { - self.waker.wake(self.coro_id) - } -} - -#[derive(Debug, Clone)] -pub struct Notifier { - tx: flume::Sender, -} - -impl Notifier { - pub fn wake(&self, coro_id: CoroId) { - self.tx.send(coro_id).ok(); - } - - pub fn create_waker(&self, coro_id: CoroId) -> std::task::Waker { - Arc::new(CoroWaker { - waker: self.clone(), - coro_id, - }) - .into() - } -} +pub type AreaOfInterestHandlePair = (AreaOfInterestHandle, AreaOfInterestHandle); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 75d1fefc03..348309ce5c 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -3,6 +3,7 @@ use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; use iroh_net::magic_endpoint::{Connection, RecvStream, SendStream}; +use strum::{EnumCount, VariantArray}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, @@ -12,11 +13,14 @@ use tracing::{debug, error_span, instrument, trace, warn, Instrument}; use crate::{ actor::ActorHandle, proto::wgps::{ - AccessChallenge, ChallengeHash, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, + AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, session::{ - channels::{Channels, LogicalChannelReceivers, LogicalChannelSenders}, + channels::{ + ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, + LogicalChannelSenders, + }, Role, SessionInit, }, util::channel::{ @@ -43,14 +47,15 @@ pub async fn run( Role::Betty => conn.accept_bi().await?, }; control_send_stream.set_priority(i32::MAX)?; + debug!("control channel ready"); let initial_transmission = exchange_commitments(&mut control_send_stream, &mut control_recv_stream).await?; - debug!("commitments exchanged"); + debug!("exchanged commitments"); let (control_send, control_recv) = spawn_channel( &mut join_set, - LogicalChannel::Control, + Channel::Control, CHANNEL_CAP, CHANNEL_CAP, Guarantees::Unlimited, @@ -59,12 +64,16 @@ pub async fn run( ); let (logical_send, logical_recv) = open_logical_channels(&mut join_set, conn, our_role).await?; - debug!("channels opened"); + debug!("logical channels ready"); let channels = Channels { - control_send, - control_recv, - logical_send, - logical_recv, + send: ChannelSenders { + control_send, + logical_send, + }, + recv: ChannelReceivers { + control_recv, + logical_recv, + }, }; let handle = actor .init_session(peer, our_role, initial_transmission, channels, init) @@ -90,7 +99,7 @@ async fn open_logical_channels( our_role: Role, ) -> anyhow::Result<(LogicalChannelSenders, LogicalChannelReceivers)> { let cap = CHANNEL_CAP; - let channels = [LogicalChannel::Reconciliation, LogicalChannel::StaticToken]; + let channels = LogicalChannel::all(); let mut channels = match our_role { // Alfie opens a quic stream for each logical channel, and sends a single byte with the // channel id. @@ -99,23 +108,26 @@ async fn open_logical_channels( .map(|ch| { let conn = conn.clone(); async move { - let ch_id = ch as u8; let (mut send, recv) = conn.open_bi().await?; - send.write_u8(ch_id).await?; + send.write_u8(ch.id()).await?; + trace!(?ch, "opened bi stream"); Result::<_, anyhow::Error>::Ok((ch, Some((send, recv)))) } }) .try_join() .await } - // Alfie accepts as many quick streams as there are logical channels, and reads a single + // Betty accepts as many quick streams as there are logical channels, and reads a single // byte on each, which is expected to contain a channel id. Role::Betty => { channels .map(|_| async { let (send, mut recv) = conn.accept_bi().await?; + trace!("accepted bi stream"); let channel_id = recv.read_u8().await?; - let channel = LogicalChannel::try_from(channel_id)?; + trace!("read channel id {channel_id}"); + let channel = LogicalChannel::from_id(channel_id)?; + trace!("accepted bi stream for logical channel {channel:?}"); Result::<_, anyhow::Error>::Ok((channel, Some((send, recv)))) }) .try_join() @@ -123,17 +135,16 @@ async fn open_logical_channels( } }?; - let mut take_and_spawn_channel = |ch| { + let mut take_and_spawn_channel = |channel| { channels .iter_mut() - .find(|(c, _)| *c == ch) - .map(|(_, streams)| streams.take()) + .find_map(|(ch, streams)| (*ch == channel).then(|| streams.take())) .flatten() - .ok_or(MissingChannel(ch)) + .ok_or(MissingChannel(channel)) .map(|(send_stream, recv_stream)| { spawn_channel( join_set, - ch, + Channel::Logical(channel), cap, cap, Guarantees::Limited(0), @@ -145,21 +156,27 @@ async fn open_logical_channels( let rec = take_and_spawn_channel(LogicalChannel::Reconciliation)?; let stt = take_and_spawn_channel(LogicalChannel::StaticToken)?; + let aoi = take_and_spawn_channel(LogicalChannel::AreaOfInterest)?; + let cap = take_and_spawn_channel(LogicalChannel::Capability)?; Ok(( LogicalChannelSenders { reconciliation: rec.0, static_tokens: stt.0, + aoi: aoi.0, + capability: cap.0, }, LogicalChannelReceivers { - reconciliation: rec.1, - static_tokens: stt.1, + reconciliation_recv: rec.1.into(), + static_tokens_recv: stt.1.into(), + aoi_recv: aoi.1.into(), + capability_recv: cap.1.into(), }, )) } fn spawn_channel( join_set: &mut JoinSet>, - ch: LogicalChannel, + ch: Channel, send_cap: usize, recv_cap: usize, guarantees: Guarantees, @@ -254,9 +271,10 @@ async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<( #[cfg(test)] mod tests { - use std::{collections::HashSet, time::Instant}; + use std::{collections::BTreeSet, time::Instant}; use futures_lite::StreamExt; + use futures_util::FutureExt; use iroh_base::{hash::Hash, key::SecretKey}; use iroh_net::MagicEndpoint; use rand::SeedableRng; @@ -325,7 +343,7 @@ mod tests { let namespace_id: NamespaceId = namespace_secret.public_key().into(); let start = Instant::now(); - let mut expected_entries = HashSet::new(); + let mut expected_entries = BTreeSet::new(); let store_alfie = MemoryStore::default(); let handle_alfie = ActorHandle::spawn(store_alfie, node_id_alfie); @@ -392,7 +410,11 @@ mod tests { node_id_betty, Role::Alfie, init_alfie - ), + ) + .map(|res| { + info!("alfie done: {res:?}"); + res + }), run( node_id_betty, handle_betty.clone(), @@ -400,7 +422,11 @@ mod tests { node_id_alfie, Role::Betty, init_betty - ), + ) + .map(|res| { + info!("betty done: {res:?}"); + res + }), ); info!(time=?start.elapsed(), "reconciliation finished!"); println!("reconciliation took {:?}", start.elapsed()); @@ -418,27 +444,33 @@ mod tests { assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - assert_eq!( - get_entries(&handle_alfie, namespace_id).await?, - expected_entries, - "alfie expected entries" - ); - assert_eq!( - get_entries(&handle_betty, namespace_id).await?, - expected_entries, - "bettyexpected entries" - ); + // assert_eq!( + // get_entries(&handle_alfie, namespace_id).await?, + // expected_entries, + // "alfie expected entries" + // ); + // assert_eq!( + // get_entries(&handle_betty, namespace_id).await?, + // expected_entries, + // "bettyexpected entries" + // ); + let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; + let betty_entries = get_entries(&handle_alfie, namespace_id).await?; + info!("alfie has now {} entries", alfie_entries.len()); + info!("betty has now {} entries", betty_entries.len()); + assert!(alfie_entries == expected_entries, "alfie expected entries"); + assert!(betty_entries == expected_entries, "betty expected entries"); Ok(()) } async fn get_entries( store: &ActorHandle, namespace: NamespaceId, - ) -> anyhow::Result> { - let entries: HashSet<_> = store + ) -> anyhow::Result> { + let entries: BTreeSet<_> = store .get_entries(namespace, ThreeDRange::full()) .await? - .collect::>() + .collect::>() .await; Ok(entries) } diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 812c4da5aa..b9208f1c88 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -3,6 +3,7 @@ use std::{fmt, io::Write}; use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; +use strum::{EnumCount, VariantArray}; use crate::util::{DecodeOutcome, Decoder, Encoder}; @@ -68,13 +69,55 @@ pub enum HandleType { StaticToken, } +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] +pub enum Channel { + Control, + Logical(LogicalChannel), +} + +impl Channel { + pub fn fmt_short(&self) -> &'static str { + match self { + Channel::Control => "Ctl", + Channel::Logical(ch) => ch.fmt_short(), + } + } + + pub fn id(&self) -> u8 { + match self { + Channel::Control => 0, + Channel::Logical(ch) => ch.id(), + } + } + + pub fn from_id(self, id: u8) -> Result { + match id { + 0 => Ok(Self::Control), + _ => { + let ch = LogicalChannel::from_id(id)?; + Ok(Self::Logical(ch)) + } + } + } +} + /// The different logical channels employed by the WGPS. -#[derive(Debug, Serialize, Deserialize, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] +#[derive( + Debug, + Serialize, + Deserialize, + Copy, + Clone, + Eq, + PartialEq, + Hash, + strum::EnumIter, + strum::VariantArray, + strum::EnumCount, +)] pub enum LogicalChannel { - /// Control channel - Control = 0, /// Logical channel for performing 3d range-based set reconciliation. - Reconciliation = 1, + Reconciliation, // TODO: use all the channels // right now everything but reconciliation goes into the control channel // @@ -84,42 +127,54 @@ pub enum LogicalChannel { // /// Logical channel for controlling the binding of new IntersectionHandles. // Intersection, // - // /// Logical channel for controlling the binding of new CapabilityHandles. - // Capability, + /// Logical channel for controlling the binding of new CapabilityHandles. + Capability, // - // /// Logical channel for controlling the binding of new AreaOfInterestHandles. - // AreaOfInterest, + /// Logical channel for controlling the binding of new AreaOfInterestHandles. + AreaOfInterest, // // /// Logical channel for controlling the binding of new PayloadRequestHandles. // PayloadRequest, // /// Logical channel for controlling the binding of new StaticTokenHandles. - StaticToken = 8, + StaticToken, } #[derive(Debug, thiserror::Error)] #[error("invalid channel id")] pub struct InvalidChannelId; -impl TryFrom for LogicalChannel { - type Error = InvalidChannelId; +impl LogicalChannel { + pub fn all() -> [LogicalChannel; LogicalChannel::COUNT] { + LogicalChannel::VARIANTS + .try_into() + .expect("statically checked") + } + pub fn fmt_short(&self) -> &'static str { + match self { + LogicalChannel::Reconciliation => "Rec", + LogicalChannel::StaticToken => "StT", + LogicalChannel::Capability => "Cap", + LogicalChannel::AreaOfInterest => "AoI", + } + } - fn try_from(value: u8) -> Result { - match value { - 0 => Ok(Self::Control), - 1 => Ok(Self::Reconciliation), - 8 => Ok(Self::StaticToken), + pub fn from_id(id: u8) -> Result { + match id { + 2 => Ok(Self::AreaOfInterest), + 3 => Ok(Self::Capability), + 4 => Ok(Self::StaticToken), + 5 => Ok(Self::Reconciliation), _ => Err(InvalidChannelId), } } -} -impl LogicalChannel { - pub fn fmt_short(&self) -> &str { + pub fn id(&self) -> u8 { match self { - LogicalChannel::Control => "Ctl", - LogicalChannel::Reconciliation => "Rec", - LogicalChannel::StaticToken => "StT", + LogicalChannel::AreaOfInterest => 2, + LogicalChannel::Capability => 3, + LogicalChannel::StaticToken => 4, + LogicalChannel::Reconciliation => 5, } } } @@ -192,7 +247,14 @@ pub struct CommitmentReveal { pub nonce: AccessChallenge, } -#[derive(Serialize, Deserialize, derive_more::From, derive_more::Debug, strum::Display)] +#[derive( + Serialize, + Deserialize, + derive_more::From, + derive_more::TryInto, + derive_more::Debug, + strum::Display, +)] pub enum Message { #[debug("{:?}", _0)] CommitmentReveal(CommitmentReveal), @@ -278,12 +340,23 @@ impl Decoder for Message { } impl Message { - pub fn logical_channel(&self) -> LogicalChannel { + pub fn channel(&self) -> Channel { match self { Message::ReconciliationSendFingerprint(_) | Message::ReconciliationAnnounceEntries(_) - | Message::ReconciliationSendEntry(_) => LogicalChannel::Reconciliation, - _ => LogicalChannel::Control, + | Message::ReconciliationSendEntry(_) => { + Channel::Logical(LogicalChannel::Reconciliation) + } + Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), + Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), + Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), + Message::CommitmentReveal(_) + | Message::ControlIssueGuarantee(_) + | Message::ControlAbsolve(_) + | Message::ControlPlead(_) + | Message::ControlAnnounceDropping(_) + | Message::ControlApologise(_) + | Message::ControlFreeHandle(_) => Channel::Control, } } } @@ -323,22 +396,32 @@ impl Message { // } // } // -// #[derive(Debug, derive_more::From)] -// pub enum ReconciliationMessage { -// SendFingerprint(ReconciliationSendFingerprint), -// AnnounceEntries(ReconciliationAnnounceEntries), -// SendEntry(ReconciliationSendEntry), -// } -// -// impl From for Message { -// fn from(message: ReconciliationMessage) -> Self { -// match message { -// ReconciliationMessage::SendFingerprint(message) => message.into(), -// ReconciliationMessage::AnnounceEntries(message) => message.into(), -// ReconciliationMessage::SendEntry(message) => message.into(), -// } -// } -// } +#[derive(Debug, derive_more::From, strum::Display)] +pub enum ReconciliationMessage { + SendFingerprint(ReconciliationSendFingerprint), + AnnounceEntries(ReconciliationAnnounceEntries), + SendEntry(ReconciliationSendEntry), +} +impl TryFrom for ReconciliationMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::ReconciliationSendFingerprint(msg) => Ok(msg.into()), + Message::ReconciliationAnnounceEntries(msg) => Ok(msg.into()), + Message::ReconciliationSendEntry(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} +impl From for Message { + fn from(message: ReconciliationMessage) -> Self { + match message { + ReconciliationMessage::SendFingerprint(message) => message.into(), + ReconciliationMessage::AnnounceEntries(message) => message.into(), + ReconciliationMessage::SendEntry(message) => message.into(), + } + } +} // // impl Encoder for ReconciliationMessage { // fn encoded_len(&self) -> usize { diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 5161e92d33..cdcf375156 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -231,6 +231,24 @@ impl Entry { } } +impl PartialOrd for Entry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Entry { + fn cmp(&self, other: &Self) -> Ordering { + self.namespace_id + .cmp(&other.namespace_id) + .then(self.subspace_id.cmp(&other.subspace_id)) + .then(self.path.cmp(&other.path)) + .then(self.timestamp.cmp(&other.timestamp)) + .then(self.payload_digest.cmp(&other.payload_digest)) + .then(self.payload_length.cmp(&other.payload_length)) + } +} + /// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. #[derive(Debug, Serialize, Deserialize)] pub struct PossiblyAuthorisedEntry(Entry, AuthorisationToken); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 6d6c57a86b..2ea46e951b 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -23,6 +23,15 @@ pub enum Role { Betty, } +impl Role { + pub fn is_alfie(&self) -> bool { + matches!(self, Role::Alfie) + } + pub fn is_betty(&self) -> bool { + matches!(self, Role::Betty) + } +} + /// The bind scope for resources. /// /// Resources are bound by either peer diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 5c731856a2..2c96a6c958 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -1,20 +1,99 @@ +use std::{ + marker::PhantomData, + pin::Pin, + task::{self, ready, Poll}, +}; + +use futures_lite::{Stream, StreamExt}; use tracing::debug; use crate::{ - actor::WakeableCoro, - proto::wgps::{LogicalChannel, Message}, + proto::wgps::{ + Channel, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, + SetupBindReadCapability, SetupBindStaticToken, + }, util::channel::{ReadError, Receiver, Sender, WriteError}, }; -#[derive(Debug, Clone)] +use super::Error; + +// pub struct MessageSender { +// inner: Sender, +// _phantom: PhantomData +// } +// impl> MessageSender { +// async fn send(&self, message: T) -> Result<(), WriteError> { +// self.inner.send_message(&message.into()).await +// } +// } + +#[derive(Debug)] +pub struct MessageReceiver { + inner: Receiver, + _phantom: PhantomData, +} + +impl> MessageReceiver { + pub async fn recv(&self) -> Option> { + let message = self.inner.recv().await?; + match message { + Err(err) => Some(Err(err.into())), + Ok(message) => { + debug!(%message, "recv"); + let message = message.try_into().map_err(|_| Error::WrongChannel); + Some(message) + } + } + } + pub fn close(&self) { + self.inner.close() + } + + pub fn poll_recv(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + let message = ready!(Pin::new(&mut self.inner).poll_next(cx)); + let message = match message { + None => None, + Some(Err(err)) => Some(Err(err.into())), + Some(Ok(message)) => { + debug!(%message, "recv"); + let message = message.try_into().map_err(|_| Error::WrongChannel); + Some(message) + } + }; + Poll::Ready(message) + } +} + +impl + Unpin> Stream for MessageReceiver { + type Item = Result; + fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + self.get_mut().poll_recv(cx) + } +} + +impl> From> for MessageReceiver { + fn from(inner: Receiver) -> Self { + Self { + inner, + _phantom: PhantomData, + } + } +} + +#[derive(Debug)] pub struct LogicalChannelReceivers { - pub reconciliation: Receiver, - pub static_tokens: Receiver, + pub reconciliation_recv: MessageReceiver, + pub static_tokens_recv: MessageReceiver, + pub capability_recv: MessageReceiver, + pub aoi_recv: MessageReceiver, } + impl LogicalChannelReceivers { pub fn close(&self) { - self.reconciliation.close(); - self.static_tokens.close(); + self.reconciliation_recv.close(); + self.static_tokens_recv.close(); + self.capability_recv.close(); + self.aoi_recv.close(); } } @@ -22,70 +101,89 @@ impl LogicalChannelReceivers { pub struct LogicalChannelSenders { pub reconciliation: Sender, pub static_tokens: Sender, + pub aoi: Sender, + pub capability: Sender, } impl LogicalChannelSenders { pub fn close(&self) { self.reconciliation.close(); self.static_tokens.close(); + self.aoi.close(); + self.capability.close(); + } + + pub fn get(&self, channel: LogicalChannel) -> &Sender { + match channel { + LogicalChannel::Reconciliation => &self.reconciliation, + LogicalChannel::StaticToken => &self.static_tokens, + LogicalChannel::Capability => &self.capability, + LogicalChannel::AreaOfInterest => &self.aoi, + } } } #[derive(Debug, Clone)] -pub struct Channels { +pub struct ChannelSenders { pub control_send: Sender, - pub control_recv: Receiver, pub logical_send: LogicalChannelSenders, +} + +#[derive(Debug)] +pub struct ChannelReceivers { + pub control_recv: Receiver, pub logical_recv: LogicalChannelReceivers, } -impl Channels { +#[derive(Debug)] +pub struct Channels { + pub send: ChannelSenders, + pub recv: ChannelReceivers, +} + +impl ChannelSenders { pub fn close_all(&self) { - self.control_send.close(); - self.control_recv.close(); - self.logical_send.close(); - self.logical_recv.close(); - } - pub fn close_send(&self) { self.control_send.close(); self.logical_send.close(); } - pub fn sender(&self, channel: LogicalChannel) -> &Sender { + pub fn get(&self, channel: Channel) -> &Sender { match channel { - LogicalChannel::Control => &self.control_send, - LogicalChannel::Reconciliation => &self.logical_send.reconciliation, - LogicalChannel::StaticToken => &self.logical_send.static_tokens, + Channel::Control => &self.control_send, + Channel::Logical(channel) => self.get_logical(channel), } } - pub fn receiver(&self, channel: LogicalChannel) -> &Receiver { - match channel { - LogicalChannel::Control => &self.control_recv, - LogicalChannel::Reconciliation => &self.logical_recv.reconciliation, - LogicalChannel::StaticToken => &self.logical_recv.static_tokens, - } + + pub fn get_logical(&self, channel: LogicalChannel) -> &Sender { + self.logical_send.get(channel) } - pub async fn send_co( - &self, - co: &WakeableCoro, - message: impl Into, - ) -> Result<(), WriteError> { - let message = message.into(); - let channel = message.logical_channel(); - co.yield_wake(self.sender(channel).send_message(&message)) - .await?; - debug!(%message, ch=%channel.fmt_short(), "send"); + pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { + let message: Message = message.into(); + let channel = message.channel(); + tracing::trace!(%message, ch=%channel.fmt_short(), "now send"); + self.get(channel).send_message(&message).await?; + debug!(%message, ch=%channel.fmt_short(), "sent"); Ok(()) } +} - pub async fn recv_co( - &self, - co: &WakeableCoro, - channel: LogicalChannel, - ) -> Option> { - let message = co.yield_wake(self.receiver(channel).recv_message()).await; - if let Some(Ok(message)) = &message { - debug!(%message, ch=%channel.fmt_short(),"recv"); - } - message +impl ChannelReceivers { + pub fn close_all(&self) { + self.control_recv.close(); + self.logical_recv.close(); } + // pub fn get(&self, channel: LogicalChannel) -> &Receiver { + // match channel { + // LogicalChannel::Control => &self.control_recv, + // LogicalChannel::Reconciliation => &self.logical_recv.reconciliation_recv, + // LogicalChannel::StaticToken => &self.logical_recv.static_tokens_recv, + // } + // } + // + // pub async fn recv(&self, channel: LogicalChannel) -> Option> { + // let message = self.get(channel).recv().await; + // if let Some(Ok(message)) = &message { + // debug!(%message, ch=%channel.fmt_short(),"recv"); + // } + // message + // } } diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 2f74407460..b486806f9e 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -3,93 +3,243 @@ use std::{ rc::Rc, }; -use tracing::{debug, trace}; +use futures_lite::StreamExt; +use strum::IntoEnumIterator; +use tracing::{debug, error_span, trace}; use crate::{ - actor::{InitWithArea, WakeableCoro, Yield}, + actor::AreaOfInterestHandlePair, proto::{ grouping::ThreeDRange, keys::NamespaceId, wgps::{ AreaOfInterestHandle, ControlIssueGuarantee, Fingerprint, LengthyEntry, LogicalChannel, - Message, ReconciliationAnnounceEntries, ReconciliationSendEntry, + Message, ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, SetupBindAreaOfInterest, }, willow::AuthorisedEntry, }, - session::{Channels, Error, SessionInit, SessionState, SharedSessionState}, + session::{ + channels::LogicalChannelReceivers, Channels, Error, SessionInit, SessionState, + SharedSessionState, + }, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{ReadError, WriteError}, + util::{ + channel::{Receiver, WriteError}, + task_set::TaskKey, + }, }; +use super::channels::{ChannelReceivers, MessageReceiver}; + const INITIAL_GUARANTEES: u64 = u64::MAX; #[derive(derive_more::Debug)] -pub struct ControlRoutine { - channels: Channels, - state: SharedSessionState, - co: WakeableCoro, +pub struct ControlRoutine { + control_recv: Receiver, + state: SharedSessionState, + init: Option, +} + +#[derive(Debug)] +pub enum ReconcileState { + Idle(Option>), + Running(TaskKey), } -impl ControlRoutine { - pub fn new(co: WakeableCoro, channels: Channels, state: SharedSessionState) -> Self { + +impl ReconcileState { + fn take_receiver(&mut self) -> Option> { + match self { + Self::Idle(recv) => recv.take(), + _ => None, + } + } +} + +impl ControlRoutine { + pub async fn run( + channels: Channels, + state: SessionState, + store: Rc>, + init: SessionInit, + ) -> Result<(), Error> { + let Channels { send, recv } = channels; + let ChannelReceivers { + control_recv, + logical_recv, + } = recv; + let LogicalChannelReceivers { + reconciliation_recv, + mut static_tokens_recv, + mut capability_recv, + mut aoi_recv, + } = logical_recv; + + let reconcile_state = ReconcileState::Idle(Some(reconciliation_recv)); + let state = SharedSessionState::new(state, send, store, reconcile_state); + + // spawn a task to handle incoming static tokens. + state.spawn(error_span!("stt"), move |state| async move { + while let Some(message) = static_tokens_recv.try_next().await? { + state.state_mut().on_setup_bind_static_token(message); + } + Ok(()) + }); + + // spawn a task to handle incoming capabilities. + state.spawn(error_span!("cap"), move |state| async move { + while let Some(message) = capability_recv.try_next().await? { + state.state_mut().on_setup_bind_read_capability(message)?; + } + Ok(()) + }); + + // spawn a task to handle incoming areas of interest. + state.spawn(error_span!("aoi"), move |state| async move { + while let Some(message) = aoi_recv.try_next().await? { + Self::on_bind_area_of_interest(state.clone(), message).await?; + } + Ok(()) + }); + Self { - channels, + control_recv, state, - co, + init: Some(init), } + .run_inner() + .await } - pub async fn run(mut self, init: SessionInit) -> Result<(), Error> { + + async fn run_inner(mut self) -> Result<(), Error> { debug!(role = ?self.state().our_role, "start session"); + + // reveal our nonce. let reveal_message = self.state().commitment_reveal()?; - self.send(reveal_message).await?; - let msg = ControlIssueGuarantee { - amount: INITIAL_GUARANTEES, - channel: LogicalChannel::Reconciliation, - }; - self.send(msg).await?; + self.state.send(reveal_message).await?; - let mut init = Some(init); - while let Some(message) = self.recv(LogicalChannel::Control).await { - let message = message?; - match message { - Message::CommitmentReveal(msg) => { - self.state().on_commitment_reveal(msg)?; - let init = init - .take() - .ok_or_else(|| Error::InvalidMessageInCurrentState)?; - self.setup(init).await?; - } - Message::SetupBindReadCapability(msg) => { - self.state().on_setup_bind_read_capability(msg)?; - } - Message::SetupBindStaticToken(msg) => { - self.state().on_setup_bind_static_token(msg); - } - Message::SetupBindAreaOfInterest(msg) => { - let start = self.state().on_setup_bind_area_of_interest(msg)?; - self.co.yield_(Yield::StartReconciliation(start)).await; - } - Message::ControlFreeHandle(_msg) => { - // TODO: Free handles - } - Message::ControlIssueGuarantee(msg) => { - let ControlIssueGuarantee { amount, channel } = msg; - // let receiver = self.channels.receiver(channel); - // let did_set = receiver.set_cap(amount as usize); - // tracing::error!("recv {channel:?} {amount} {did_set}"); - let sender = self.channels.sender(channel); - let did_set = sender.add_guarantees(amount); - debug!(?channel, amount, ?did_set, "set send capacity"); + // issue guarantees for all logical channels. + for channel in LogicalChannel::iter() { + let msg = ControlIssueGuarantee { + amount: INITIAL_GUARANTEES, + channel, + }; + self.state.send(msg).await?; + } + + let res = loop { + tracing::info!("WAIT"); + tokio::select! { + // _ = self.state.notify_complete.notified() => { + // tracing::info!("NOTIFIED!"); + // break Ok(()) + // }, + message = self.control_recv.recv() => { + match message { + Some(message) => self.on_control_message(message?)?, + // If the remote closed their control stream, we abort the session. + None => break Ok(()), + } + }, + Some((key, result)) = self.state.join_next_task(), if !self.state.tasks.borrow().is_empty() => { + debug!(?key, ?result, "task completed"); + result?; + // Is this the right place for this check? It would run after each task + // completion, so necessarily including the completion of the reconciliation + // task, which is the only condition in which reconciliation can complete at + // the moment. + // + // TODO: We'll want to emit the completion event back to the application and + // let it decide what to do (stop, keep open) - or pass relevant config in + // SessionInit. + if self.state.state_mut().reconciliation_is_complete() { + tracing::debug!("stop session: reconciliation is complete"); + break Ok(()); + } } - _ => return Err(Error::UnsupportedMessage), } + }; + + // Close all our send streams. + // + // This makes the networking send loops stop. + self.state.send.close_all(); + + res + } + fn on_control_message(&mut self, message: Message) -> Result<(), Error> { + debug!(%message, "recv"); + match message { + Message::CommitmentReveal(msg) => { + self.state().on_commitment_reveal(msg)?; + let init = self + .init + .take() + .ok_or_else(|| Error::InvalidMessageInCurrentState)?; + self.state + .spawn(error_span!("setup"), |state| Self::setup(state, init)); + } + Message::ControlIssueGuarantee(msg) => { + let ControlIssueGuarantee { amount, channel } = msg; + let sender = self.state.send.get_logical(channel); + debug!(?channel, %amount, "add guarantees"); + sender.add_guarantees(amount); + } + // Message::ControlFreeHandle(_msg) => { + // TODO: Free handles + // } + _ => return Err(Error::UnsupportedMessage), } Ok(()) } - async fn setup(&mut self, init: SessionInit) -> Result<(), Error> { - debug!(interests = init.interests.len(), "setup"); + async fn on_bind_area_of_interest( + session: SharedSessionState, + message: SetupBindAreaOfInterest, + ) -> Result<(), Error> { + let capability = session + .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) + .await; + capability.try_granted_area(&message.area_of_interest.area)?; + let mut state = session.state.borrow_mut(); + let their_handle = state.their_resources.areas_of_interest.bind(message); + match state.find_shared_aoi_from_theirs(&their_handle)? { + None => { + debug!("no shared aoi, skip"); + Ok(()) + } + Some(our_handle) => { + drop(state); + debug!("shared aoi found, start reconcile"); + Self::start_reconcile(session, (our_handle, their_handle)) + } + } + } + + fn start_reconcile( + mut session: SharedSessionState, + (our_handle, their_handle): AreaOfInterestHandlePair, + ) -> Result<(), Error> { + let recv = session + .reconcile_state + .borrow_mut() + .take_receiver() + .ok_or(Error::InvalidMessageInCurrentState)?; + let snapshot = Rc::new(session.store().snapshot()?); + let fut = ReconcileRoutine { + state: session.clone(), + snapshot, + recv, + } + .run((our_handle, their_handle)); + let task_key = session.spawn(error_span!("reconcile"), |_| fut); + *session.reconcile_state.borrow_mut() = ReconcileState::Running(task_key); + Ok(()) + } + + async fn setup(session: SharedSessionState, init: SessionInit) -> Result<(), Error> { + debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { if *capability.receiver() != init.user_secret_key.public_key() { return Err(Error::WrongSecretKeyForCapability); @@ -97,13 +247,13 @@ impl ControlRoutine { // TODO: implement private area intersection let intersection_handle = 0.into(); - let (our_capability_handle, message) = self.state().bind_and_sign_capability( + let (our_capability_handle, message) = session.state_mut().bind_and_sign_capability( &init.user_secret_key, intersection_handle, capability, )?; if let Some(message) = message { - self.send(message).await?; + session.send(message).await?; } for area_of_interest in aois { @@ -111,87 +261,68 @@ impl ControlRoutine { area_of_interest, authorisation: our_capability_handle, }; - let (_our_handle, is_new) = self - .state() + + let (our_handle, is_new) = session + .state_mut() .our_resources .areas_of_interest .bind_if_new(msg.clone()); + if is_new { - self.send(msg).await?; + session.send(msg).await?; + if let Some(their_handle) = + session.state_mut().find_shared_aoi_from_ours(&our_handle)? + { + debug!("sent aoi, shared, start reconcile"); + Self::start_reconcile(session.clone(), (our_handle, their_handle))?; + } else { + debug!("sent aoi, not yet shared"); + } } } } + debug!("setup done"); Ok(()) } fn state(&mut self) -> RefMut { - self.state.borrow_mut() - } - - async fn recv(&self, channel: LogicalChannel) -> Option> { - self.channels.recv_co(&self.co, channel).await - } - - async fn send(&self, message: impl Into) -> Result<(), WriteError> { - self.channels.send_co(&self.co, message).await + self.state.state_mut() } } #[derive(derive_more::Debug)] pub struct ReconcileRoutine { - store_snapshot: Rc, - store_writer: Rc>, - channels: Channels, - state: SharedSessionState, - co: WakeableCoro, + snapshot: Rc, + recv: MessageReceiver, + state: SharedSessionState, } // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. impl ReconcileRoutine { - pub fn new( - co: WakeableCoro, - channels: Channels, - state: SharedSessionState, - store_snapshot: Rc, - store_writer: Rc>, - ) -> Self { - Self { - channels, - state, - co, - store_snapshot, - store_writer, - } - } - pub async fn run(mut self, start: Option) -> Result<(), Error> { - debug!(init = start.is_some(), "start reconciliation"); - + pub async fn run(mut self, shared_aoi: AreaOfInterestHandlePair) -> Result<(), Error> { + let our_role = self.state().our_role; + tracing::warn!(init = our_role.is_alfie(), "start reconciliation"); // optionally initiate reconciliation with a first fingerprint. only alfie may do this. - if let Some((our_handle, their_handle)) = start { - self.start_reconciliation(our_handle, their_handle).await?; + if our_role.is_alfie() { + self.initiate(shared_aoi.0, shared_aoi.1).await?; } - while let Some(message) = self.recv(LogicalChannel::Reconciliation).await { + while let Some(message) = self.recv.recv().await { let message = message?; - trace!(%message, "recv"); + debug!(?message, "recv"); match message { - Message::ReconciliationSendFingerprint(message) => { + ReconciliationMessage::SendFingerprint(message) => { self.on_send_fingerprint(message).await? } - Message::ReconciliationAnnounceEntries(message) => { + ReconciliationMessage::AnnounceEntries(message) => { self.on_announce_entries(message).await? } - Message::ReconciliationSendEntry(message) => self.on_send_entry(message).await?, - _ => return Err(Error::UnsupportedMessage), + ReconciliationMessage::SendEntry(message) => self.on_send_entry(message).await?, }; if self.state().reconciliation_is_complete() { - // we won't send anything further, so close our send channel, which will end the - // remote's recv channel. - self.channels.logical_send.reconciliation.close(); - // for now unconditionally end the session by closing our control receiver - self.channels.control_recv.close(); + tracing::info!("reconciliation complete, close session"); break; } } @@ -199,7 +330,7 @@ impl ReconcileRoutine { Ok(()) } - async fn start_reconciliation( + async fn initiate( &mut self, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, @@ -225,7 +356,7 @@ impl ReconcileRoutine { let range = common_aoi.into_range(); state.reconciliation_started = true; drop(state); - let fingerprint = self.store_snapshot.fingerprint(namespace, &range)?; + let fingerprint = self.snapshot.fingerprint(namespace, &range)?; self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) .await?; Ok(()) @@ -251,7 +382,7 @@ impl ReconcileRoutine { state.range_is_authorised(&range, &our_handle, &their_handle)? }; - let our_fingerprint = self.store_snapshot.fingerprint(namespace, &range)?; + let our_fingerprint = self.snapshot.fingerprint(namespace, &range)?; // case 1: fingerprint match. if our_fingerprint == their_fingerprint { @@ -333,11 +464,8 @@ impl ReconcileRoutine { async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { let static_token = self - .co - .yield_wake( - self.state - .get_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle), - ) + .state + .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) .await; self.state().on_send_entry()?; @@ -348,9 +476,7 @@ impl ReconcileRoutine { message.dynamic_token, )?; - self.store_writer - .borrow_mut() - .ingest_entry(&authorised_entry)?; + self.state.store().ingest_entry(&authorised_entry)?; Ok(()) } @@ -363,10 +489,9 @@ impl ReconcileRoutine { their_handle: AreaOfInterestHandle, is_final_reply_for_range: Option, ) -> anyhow::Result<()> { - { - let mut state = self.state(); - state.pending_ranges.insert((our_handle, range.clone())); - } + self.state() + .pending_ranges + .insert((our_handle, range.clone())); let msg = ReconciliationSendFingerprint { range, fingerprint, @@ -394,7 +519,7 @@ impl ReconcileRoutine { } let our_count = match our_count { Some(count) => count, - None => self.store_snapshot.count(namespace, &range)?, + None => self.snapshot.count(namespace, &range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), @@ -407,7 +532,7 @@ impl ReconcileRoutine { }; self.send(msg).await?; for authorised_entry in self - .store_snapshot + .snapshot .get_entries_with_authorisation(namespace, &range) { let authorised_entry = authorised_entry?; @@ -415,10 +540,8 @@ impl ReconcileRoutine { let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - let (static_token_handle, static_token_bind_msg) = self - .state - .borrow_mut() - .bind_our_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = + self.state.state_mut().bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { self.send(msg).await?; } @@ -442,7 +565,7 @@ impl ReconcileRoutine { // TODO: expose this config let config = SyncConfig::default(); // clone to avoid borrow checker trouble - let store_snapshot = Rc::clone(&self.store_snapshot); + let store_snapshot = Rc::clone(&self.snapshot); let mut iter = store_snapshot .split_range(namespace, &range, &config)? .peekable(); @@ -479,14 +602,10 @@ impl ReconcileRoutine { } fn state(&mut self) -> RefMut { - self.state.borrow_mut() - } - - async fn recv(&self, channel: LogicalChannel) -> Option> { - self.channels.recv_co(&self.co, channel).await + self.state.state_mut() } async fn send(&self, message: impl Into) -> Result<(), WriteError> { - self.channels.send_co(&self.co, message).await + self.state.send(message).await } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 9580fa5b32..943201720e 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -33,6 +33,8 @@ pub enum Error { UnauthorisedEntryReceived, #[error("received an unsupported message type")] UnsupportedMessage, + #[error("received a message that is intended for another channel")] + WrongChannel, #[error("the received nonce does not match the received committment")] BrokenCommittement, #[error("received an actor message for unknown session")] @@ -43,6 +45,8 @@ pub enum Error { InvalidState(&'static str), #[error("actor failed to respond")] ActorFailed, + #[error("a task failed to join")] + TaskFailed(#[from] tokio::task::JoinError), } impl From for Error { diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index e41a7e14c6..ed9e7f37ea 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,12 +1,14 @@ use std::{ cell::{RefCell, RefMut}, collections::HashSet, + pin::Pin, rc::Rc, task::Poll, }; -use futures_lite::future::poll_fn; -use tracing::warn; +use futures_lite::{future::poll_fn, StreamExt}; +use tokio::sync::Notify; +use tracing::{warn, Instrument, Span}; use crate::{ net::InitialTransmission, @@ -20,25 +22,91 @@ use crate::{ SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }, + store::Store, + util::{ + channel::WriteError, + task_set::{TaskKey, TaskSet}, + }, }; use super::{ + channels::ChannelSenders, + coroutine::ReconcileState, resource::{ResourceMap, ScopedResources}, Error, Role, Scope, }; -#[derive(derive_more::Debug, Clone)] -pub struct SharedSessionState { - inner: Rc>, +#[derive(derive_more::Debug)] +pub struct SharedSessionState { + pub state: Rc>, + pub send: ChannelSenders, + #[debug("Store")] + pub store: Rc>, + pub tasks: Rc>>>, + pub reconcile_state: Rc>, + // pub notify_complete: Rc, +} +impl Clone for SharedSessionState { + fn clone(&self) -> Self { + Self { + state: Rc::clone(&self.state), + send: self.send.clone(), + store: Rc::clone(&self.store), + tasks: Rc::clone(&self.tasks), + reconcile_state: Rc::clone(&self.reconcile_state), + // notify_complete: Rc::clone(&self.notify_complete), + } + } } -impl SharedSessionState { - pub fn new(state: SessionState) -> Self { +impl SharedSessionState { + pub fn new( + state: SessionState, + send: ChannelSenders, + store: Rc>, + reconcile_state: ReconcileState, + ) -> Self { Self { - inner: Rc::new(RefCell::new(state)), + state: Rc::new(RefCell::new(state)), + send, + store, + tasks: Default::default(), + reconcile_state: Rc::new(RefCell::new(reconcile_state)), + // notify_complete: Default::default(), } } - pub async fn get_resource_eventually( + + pub fn spawn(&self, span: Span, f: F) -> TaskKey + where + F: FnOnce(SharedSessionState) -> Fut, + Fut: std::future::Future> + 'static, + { + let state = self.clone(); + let fut = f(state); + let fut = fut.instrument(span); + let key = self.tasks.borrow_mut().spawn_local(fut); + key + } + + pub async fn join_next_task(&self) -> Option<(TaskKey, Result<(), Error>)> { + std::future::poll_fn(|cx| { + let mut tasks = self.tasks.borrow_mut(); + let res = std::task::ready!(Pin::new(&mut tasks).poll_next(cx)); + let res = match res { + None => None, + Some((key, Ok(r))) => Some((key, r)), + Some((key, Err(r))) => Some((key, Err(r.into()))), + }; + Poll::Ready(res) + }) + .await + } + + pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { + self.send.send(message).await + } + + pub async fn get_their_resource_eventually( &self, selector: F, handle: H, @@ -46,7 +114,7 @@ impl SharedSessionState { where F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, { - let inner = self.inner.clone(); + let inner = self.state.clone(); poll_fn(move |cx| { let mut inner = inner.borrow_mut(); let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).their_resources); @@ -56,11 +124,32 @@ impl SharedSessionState { .await } - pub fn borrow_mut(&self) -> RefMut { - self.inner.borrow_mut() + pub async fn get_our_resource_eventually( + &self, + selector: F, + handle: H, + ) -> R + where + F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + { + let inner = self.state.clone(); + poll_fn(move |cx| { + let mut inner = inner.borrow_mut(); + let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).our_resources); + let r = std::task::ready!(res.poll_get_eventually(handle, cx)); + Poll::Ready(r.clone()) + }) + .await + } + + pub fn state_mut(&self) -> RefMut { + self.state.borrow_mut() + } + + pub fn store(&mut self) -> RefMut { + self.store.borrow_mut() } } -// impl SharedSessio #[derive(Debug)] pub struct SessionState { @@ -71,6 +160,7 @@ pub struct SessionState { pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pub pending_entries: Option, pub challenge: ChallengeState, + // pub reconcile_state: ReconcileState } impl SessionState { @@ -97,6 +187,12 @@ impl SessionState { } } pub fn reconciliation_is_complete(&self) -> bool { + // tracing::debug!( + // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {}", + // self.reconciliation_started, + // self.pending_ranges.len(), + // self.pending_entries.is_some() + // ); self.reconciliation_started && self.pending_ranges.is_empty() && self.pending_entries.is_none() @@ -122,10 +218,10 @@ impl SessionState { Ok((our_handle, maybe_message)) } - pub fn commitment_reveal(&mut self) -> Result { + pub fn commitment_reveal(&mut self) -> Result { match self.challenge { ChallengeState::Committed { our_nonce, .. } => { - Ok(CommitmentReveal { nonce: our_nonce }.into()) + Ok(CommitmentReveal { nonce: our_nonce }) } _ => Err(Error::InvalidMessageInCurrentState), } @@ -151,29 +247,24 @@ impl SessionState { self.their_resources.static_tokens.bind(msg.static_token); } - pub fn on_setup_bind_area_of_interest( - &mut self, - msg: SetupBindAreaOfInterest, - ) -> Result, Error> { - let capability = self - .their_resources - .capabilities - .try_get(&msg.authorisation)?; - capability.try_granted_area(&msg.area_of_interest.area)?; - let their_handle = self.their_resources.areas_of_interest.bind(msg); + // pub fn on_setup_bind_area_of_interest( + // &mut self, + // msg: SetupBindAreaOfInterest, + // ) -> Result, Error> { + // let capability = self + // .their_resources + // .capabilities + // .try_get(&msg.authorisation)?; + // capability.try_granted_area(&msg.area_of_interest.area)?; + // let their_handle = self.their_resources.areas_of_interest.bind(msg); + // + // let maybe_shared_aoi_handles = self + // .find_shared_aoi(&their_handle)? + // .map(|our_handle| (our_handle, their_handle)); + // Ok(maybe_shared_aoi_handles) + // } - // only initiate reconciliation if we are alfie, and if we have a shared aoi - // TODO: abort if no shared aoi? - let start = if self.our_role == Role::Alfie { - self.find_shared_aoi(&their_handle)? - .map(|our_handle| (our_handle, their_handle)) - } else { - None - }; - Ok(start) - } - - pub fn find_shared_aoi( + pub fn find_shared_aoi_from_theirs( &self, their_handle: &AreaOfInterestHandle, ) -> Result, Error> { @@ -190,6 +281,20 @@ impl SessionState { Ok(maybe_our_handle) } + pub fn find_shared_aoi_from_ours( + &self, + our_handle: &AreaOfInterestHandle, + ) -> Result, Error> { + let our_aoi = self.our_resources.areas_of_interest.try_get(our_handle)?; + let maybe_their_handle = self + .their_resources + .areas_of_interest + .iter() + .find(|(_handle, aoi)| aoi.area().intersection(our_aoi.area()).is_some()) + .map(|(handle, _aoi)| *handle); + Ok(maybe_their_handle) + } + pub fn on_send_entry(&mut self) -> Result<(), Error> { let remaining = self .pending_entries @@ -274,3 +379,8 @@ impl SessionState { self.resources(scope).areas_of_interest.try_get(handle) } } + +// struct AoiFinder { +// ours: HashSet u64 { SystemTime::now() diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 5a7e4c408a..dd59f0bb71 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -8,6 +8,7 @@ use std::{ }; use bytes::{Buf, Bytes, BytesMut}; +use futures_lite::Stream; use tokio::io::AsyncWrite; use super::{DecodeOutcome, Decoder, Encoder}; @@ -116,9 +117,6 @@ impl Shared { fn set_cap(&mut self, cap: usize) -> bool { if cap >= self.buf.len() { - // if cap > self.max_buffer_size { - // self.wake_writable(); - // } self.max_buffer_size = cap; self.wake_writable(); true @@ -128,7 +126,11 @@ impl Shared { } fn add_guarantees(&mut self, amount: u64) { + let previous = self.remaining_write_capacity(); self.guarantees.add(amount); + if self.remaining_write_capacity() > previous { + self.wake_writable(); + } } fn close(&mut self) { @@ -366,11 +368,18 @@ impl Receiver { self.shared.lock().unwrap().set_cap(cap) } - pub async fn recv_message(&self) -> Option> { + pub async fn recv(&self) -> Option> { poll_fn(|cx| self.shared.lock().unwrap().poll_recv_message(cx)).await } } +impl Stream for Receiver { + type Item = Result; + fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + self.shared.lock().unwrap().poll_recv_message(cx) + } +} + impl Clone for Receiver { fn clone(&self) -> Self { Self { From b7d1ac98de93b480405636d909bceb4b10af04ab Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 15 May 2024 00:19:04 +0200 Subject: [PATCH 036/198] refactor: use current-thread executor, and other changes --- iroh-willow/cuts.rs | 39 +++ iroh-willow/src/actor.rs | 90 ++++--- iroh-willow/src/net.rs | 4 +- iroh-willow/src/proto/challenge.rs | 9 +- iroh-willow/src/proto/keys.rs | 10 + iroh-willow/src/proto/meadowcap.rs | 6 + iroh-willow/src/session.rs | 11 +- iroh-willow/src/session/aoi_finder.rs | 16 ++ iroh-willow/src/session/channels.rs | 28 +-- iroh-willow/src/session/coroutine.rs | 251 +++++++------------ iroh-willow/src/session/error.rs | 5 +- iroh-willow/src/session/state.rs | 333 ++++++++++++++++++-------- iroh-willow/src/store.rs | 73 ++++++ iroh-willow/src/util/task_set.rs | 64 +++++ 14 files changed, 610 insertions(+), 329 deletions(-) create mode 100644 iroh-willow/cuts.rs create mode 100644 iroh-willow/src/session/aoi_finder.rs create mode 100644 iroh-willow/src/util/task_set.rs diff --git a/iroh-willow/cuts.rs b/iroh-willow/cuts.rs new file mode 100644 index 0000000000..640ba9dfba --- /dev/null +++ b/iroh-willow/cuts.rs @@ -0,0 +1,39 @@ + +// async fn recv_bulk( +// &self, +// channel: LogicalChannel, +// ) -> Option>> { +// let receiver = self.channels.receiver(channel); +// let mut buf = SmallVec::<[Message; N]>::new(); +// loop { +// match receiver.read_message_or_set_notify() { +// Err(err) => return Some(Err(err)), +// Ok(outcome) => match outcome { +// ReadOutcome::Closed => { +// if buf.is_empty() { +// debug!("recv: closed"); +// return None; +// } else { +// return Some(Ok(buf)); +// } +// } +// ReadOutcome::ReadBufferEmpty => { +// if buf.is_empty() { +// self.co +// .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) +// .await; +// } else { +// return Some(Ok(buf)); +// } +// } +// ReadOutcome::Item(message) => { +// debug!(%message, "recv"); +// buf.push(message); +// if buf.len() == N { +// return Some(Ok(buf)); +// } +// } +// }, +// } +// } +// } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index c81cdd240d..0a9bfca947 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -11,14 +11,11 @@ use crate::{ proto::{ grouping::ThreeDRange, keys::NamespaceId, - wgps::AreaOfInterestHandle, + meadowcap, willow::{AuthorisedEntry, Entry}, }, - session::{ - coroutine::ControlRoutine, Channels, Error, Role, SessionInit, SessionState, - SharedSessionState, - }, - store::Store, + session::{coroutine::ControlRoutine, Channels, Error, Role, SessionInit, SharedSessionState}, + store::{KeyStore, Store}, util::task_set::{TaskKey, TaskSet}, }; @@ -36,7 +33,7 @@ impl ActorHandle { pub fn spawn(store: S, me: NodeId) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); let join_handle = std::thread::Builder::new() - .name("sync-actor".to_string()) + .name("willow-actor".to_string()) .spawn(move || { let span = error_span!("willow_thread", me=%me.fmt_short()); let _guard = span.enter(); @@ -45,7 +42,7 @@ impl ActorHandle { store: Rc::new(RefCell::new(store)), sessions: Default::default(), inbox_rx: rx, - tasks: Default::default(), + session_tasks: Default::default(), }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -70,6 +67,17 @@ impl ActorHandle { Ok(()) } + pub async fn insert_secret( + &self, + secret: impl Into, + ) -> anyhow::Result<()> { + let secret = secret.into(); + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::InsertSecret { secret, reply }).await?; + reply_rx.await??; + Ok(()) + } + pub async fn get_entries( &self, namespace: NamespaceId, @@ -93,12 +101,11 @@ impl ActorHandle { channels: Channels, init: SessionInit, ) -> anyhow::Result { - let state = SessionState::new(our_role, initial_transmission); - let (on_finish_tx, on_finish_rx) = oneshot::channel(); self.send(ToActor::InitSession { + our_role, + initial_transmission, peer, - state, channels, init, on_finish: on_finish_tx, @@ -148,9 +155,9 @@ impl SessionHandle { #[derive(derive_more::Debug, strum::Display)] pub enum ToActor { InitSession { + our_role: Role, peer: NodeId, - #[debug(skip)] - state: SessionState, + initial_transmission: InitialTransmission, #[debug(skip)] channels: Channels, init: SessionInit, @@ -166,6 +173,10 @@ pub enum ToActor { entry: AuthorisedEntry, reply: oneshot::Sender>, }, + InsertSecret { + secret: meadowcap::SecretKey, + reply: oneshot::Sender>, + }, Shutdown { #[debug(skip)] reply: Option>, @@ -174,7 +185,7 @@ pub enum ToActor { #[derive(Debug)] struct ActiveSession { - on_done: oneshot::Sender>, + on_finish: oneshot::Sender>, task_key: TaskKey, // state: SharedSessionState } @@ -184,7 +195,7 @@ pub struct StorageThread { inbox_rx: flume::Receiver, store: Rc>, sessions: HashMap, - tasks: TaskSet<(SessionId, Result<(), Error>)>, + session_tasks: TaskSet<(SessionId, Result<(), Error>)>, } impl StorageThread { @@ -208,7 +219,7 @@ impl StorageThread { } Ok(msg) => self.handle_message(msg)?, }, - Some((_key, res)) = self.tasks.next(), if !self.tasks.is_empty() => match res { + Some((_key, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => match res { Ok((id, res)) => { self.complete_session(&id, res); } @@ -228,26 +239,33 @@ impl StorageThread { ToActor::Shutdown { .. } => unreachable!("handled in run"), ToActor::InitSession { peer, - state, channels, + our_role, + initial_transmission, init, - on_finish: on_done, + on_finish, } => { - // self.init_session(peer, state, channels, init, on_finish); - let span = error_span!("session", peer=%peer.fmt_short()); let session_id = peer; + let Channels { send, recv } = channels; + let session = SharedSessionState::new( + self.store.clone(), + send, + our_role, + initial_transmission, + ); - // let Channels { send, recv } = channels; - // let store = self.store.clone(); - // let state = SharedSessionState::new(state, send, store, reconcile_state); - - let fut = ControlRoutine::run(channels, state, self.store.clone(), init); - let fut = fut.instrument(span.clone()); - let task_key = self - .tasks - .spawn_local(async move { (session_id, fut.await) }); - let session = ActiveSession { on_done, task_key }; - self.sessions.insert(peer, session); + let task_key = self.session_tasks.spawn_local( + async move { + let res = ControlRoutine::run(session, recv, init).await; + (session_id, res) + } + .instrument(error_span!("session", peer = %peer.fmt_short())), + ); + let active_session = ActiveSession { + on_finish, + task_key, + }; + self.sessions.insert(session_id, active_session); } ToActor::GetEntries { namespace, @@ -264,6 +282,10 @@ impl StorageThread { let res = self.store.borrow_mut().ingest_entry(&entry); reply.send(res).ok(); } + ToActor::InsertSecret { secret, reply } => { + let res = self.store.borrow_mut().key_store().insert(secret); + reply.send(res.map_err(anyhow::Error::from)).ok(); + } } Ok(()) } @@ -271,12 +293,10 @@ impl StorageThread { fn complete_session(&mut self, peer: &NodeId, result: Result<(), Error>) { let session = self.sessions.remove(peer); if let Some(session) = session { - self.tasks.remove(session.task_key); - session.on_done.send(result).ok(); + self.session_tasks.remove(session.task_key); + session.on_finish.send(result).ok(); } else { warn!("remove_session called for unknown session"); } } } - -pub type AreaOfInterestHandlePair = (AreaOfInterestHandle, AreaOfInterestHandle); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 348309ce5c..9c0b98a35d 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -3,7 +3,6 @@ use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; use iroh_net::magic_endpoint::{Connection, RecvStream, SendStream}; -use strum::{EnumCount, VariantArray}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, @@ -484,6 +483,7 @@ mod tests { path_fn: impl Fn(usize) -> Result, ) -> anyhow::Result { let user_secret = UserSecretKey::generate(rng); + store.insert_secret(user_secret.clone()).await?; let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); for i in 0..count { let path = path_fn(i).expect("invalid path"); @@ -498,7 +498,7 @@ mod tests { let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; store.ingest_entry(entry).await?; } - let init = SessionInit::with_interest(user_secret, read_cap, AreaOfInterest::full()); + let init = SessionInit::with_interest(read_cap, AreaOfInterest::full()); Ok(init) } diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 7b4aec6ac9..137afb509e 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -42,11 +42,16 @@ impl ChallengeState { } pub fn sign(&self, secret_key: &UserSecretKey) -> Result { - let challenge = self.get_ours()?; - let signature = secret_key.sign(challenge); + let signable = self.signable()?; + let signature = secret_key.sign(&signable); Ok(signature) } + pub fn signable(&self) -> Result<[u8; 32], Error> { + let challenge = self.get_ours()?; + Ok(*challenge) + } + pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { let their_challenge = self.get_theirs()?; user_key.verify(their_challenge, &signature)?; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 9d43e75893..a036470aeb 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -155,6 +155,11 @@ impl NamespacePublicKey { pub fn from_bytes(bytes: &[u8; 32]) -> Result { Ok(NamespacePublicKey(VerifyingKey::from_bytes(bytes)?)) } + + /// Convert into a [`NamespaceId`]. + pub fn id(&self) -> NamespaceId { + self.into() + } } /// User secret key. @@ -232,6 +237,11 @@ impl UserPublicKey { pub fn from_bytes(bytes: &[u8; 32]) -> Result { Ok(UserPublicKey(VerifyingKey::from_bytes(bytes)?)) } + + /// Convert into a [`UserId`]. + pub fn id(&self) -> UserId { + self.into() + } } impl FromStr for UserSecretKey { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 3190158a0c..d23bd2806d 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -13,6 +13,12 @@ pub type UserPublicKey = keys::UserPublicKey; pub type NamespacePublicKey = keys::NamespacePublicKey; pub type NamespaceSignature = keys::NamespaceSignature; +#[derive(Debug, derive_more::From)] +pub enum SecretKey { + User(UserSecretKey), + Namespace(NamespaceSecretKey), +} + pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { let (capability, signature) = token.as_parts(); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 2ea46e951b..314d1f6099 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,7 +1,8 @@ use std::collections::{HashMap, HashSet}; -use crate::proto::{grouping::AreaOfInterest, keys::UserSecretKey, wgps::ReadCapability}; +use crate::proto::{grouping::AreaOfInterest, wgps::ReadCapability}; +pub mod aoi_finder; pub mod channels; pub mod coroutine; mod error; @@ -45,18 +46,12 @@ pub enum Scope { #[derive(Debug)] pub struct SessionInit { - pub user_secret_key: UserSecretKey, pub interests: HashMap>, } impl SessionInit { - pub fn with_interest( - user_secret_key: UserSecretKey, - capability: ReadCapability, - area_of_interest: AreaOfInterest, - ) -> Self { + pub fn with_interest(capability: ReadCapability, area_of_interest: AreaOfInterest) -> Self { Self { - user_secret_key, interests: HashMap::from_iter([(capability, HashSet::from_iter([area_of_interest]))]), } } diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs new file mode 100644 index 0000000000..49169a90be --- /dev/null +++ b/iroh-willow/src/session/aoi_finder.rs @@ -0,0 +1,16 @@ +// use std::{cell::RefCell, collections::VecDeque, rc::Rc, task::Waker}; +// +// use crate::actor::AreaOfInterestPair; + +// pub struct AoiFinder { +// inner: Rc> +// } +// +// impl AoiFinder { +// fn push +// } +// +// struct Inner { +// queue: VecDeque, +// wakers: VecDeque +// } diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 2c96a6c958..017e292348 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -1,10 +1,11 @@ use std::{ + future::poll_fn, marker::PhantomData, pin::Pin, task::{self, ready, Poll}, }; -use futures_lite::{Stream, StreamExt}; +use futures_lite::Stream; use tracing::debug; use crate::{ @@ -12,21 +13,11 @@ use crate::{ Channel, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, }, - util::channel::{ReadError, Receiver, Sender, WriteError}, + util::channel::{Receiver, Sender, WriteError}, }; use super::Error; -// pub struct MessageSender { -// inner: Sender, -// _phantom: PhantomData -// } -// impl> MessageSender { -// async fn send(&self, message: T) -> Result<(), WriteError> { -// self.inner.send_message(&message.into()).await -// } -// } - #[derive(Debug)] pub struct MessageReceiver { inner: Receiver, @@ -34,17 +25,10 @@ pub struct MessageReceiver { } impl> MessageReceiver { - pub async fn recv(&self) -> Option> { - let message = self.inner.recv().await?; - match message { - Err(err) => Some(Err(err.into())), - Ok(message) => { - debug!(%message, "recv"); - let message = message.try_into().map_err(|_| Error::WrongChannel); - Some(message) - } - } + pub async fn recv(&mut self) -> Option> { + poll_fn(|cx| self.poll_recv(cx)).await } + pub fn close(&self) { self.inner.close() } diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index b486806f9e..233f765382 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,14 +1,10 @@ -use std::{ - cell::{RefCell, RefMut}, - rc::Rc, -}; +use std::{cell::RefMut, rc::Rc}; use futures_lite::StreamExt; use strum::IntoEnumIterator; use tracing::{debug, error_span, trace}; use crate::{ - actor::AreaOfInterestHandlePair, proto::{ grouping::ThreeDRange, keys::NamespaceId, @@ -20,17 +16,17 @@ use crate::{ willow::AuthorisedEntry, }, session::{ - channels::LogicalChannelReceivers, Channels, Error, SessionInit, SessionState, + channels::LogicalChannelReceivers, Error, Scope, SessionInit, SessionState, SharedSessionState, }, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::{ - channel::{Receiver, WriteError}, - task_set::TaskKey, - }, + util::channel::{Receiver, WriteError}, }; -use super::channels::{ChannelReceivers, MessageReceiver}; +use super::{ + channels::{ChannelReceivers, MessageReceiver}, + state::AreaOfInterestIntersection, +}; const INITIAL_GUARANTEES: u64 = u64::MAX; @@ -41,29 +37,12 @@ pub struct ControlRoutine { init: Option, } -#[derive(Debug)] -pub enum ReconcileState { - Idle(Option>), - Running(TaskKey), -} - -impl ReconcileState { - fn take_receiver(&mut self) -> Option> { - match self { - Self::Idle(recv) => recv.take(), - _ => None, - } - } -} - impl ControlRoutine { pub async fn run( - channels: Channels, - state: SessionState, - store: Rc>, + session: SharedSessionState, + recv: ChannelReceivers, init: SessionInit, ) -> Result<(), Error> { - let Channels { send, recv } = channels; let ChannelReceivers { control_recv, logical_recv, @@ -75,36 +54,38 @@ impl ControlRoutine { mut aoi_recv, } = logical_recv; - let reconcile_state = ReconcileState::Idle(Some(reconciliation_recv)); - let state = SharedSessionState::new(state, send, store, reconcile_state); - // spawn a task to handle incoming static tokens. - state.spawn(error_span!("stt"), move |state| async move { + session.spawn(error_span!("stt"), move |session| async move { while let Some(message) = static_tokens_recv.try_next().await? { - state.state_mut().on_setup_bind_static_token(message); + session.state_mut().on_setup_bind_static_token(message); } Ok(()) }); // spawn a task to handle incoming capabilities. - state.spawn(error_span!("cap"), move |state| async move { + session.spawn(error_span!("cap"), move |session| async move { while let Some(message) = capability_recv.try_next().await? { - state.state_mut().on_setup_bind_read_capability(message)?; + session.state_mut().on_setup_bind_read_capability(message)?; } Ok(()) }); // spawn a task to handle incoming areas of interest. - state.spawn(error_span!("aoi"), move |state| async move { + session.spawn(error_span!("aoi"), move |session| async move { while let Some(message) = aoi_recv.try_next().await? { - Self::on_bind_area_of_interest(state.clone(), message).await?; + Self::on_bind_area_of_interest(session.clone(), message).await?; } Ok(()) }); + // spawn a task to handle reconciliation messages + session.spawn(error_span!("rec"), move |session| async move { + Reconciler::new(session, reconciliation_recv)?.run().await + }); + Self { control_recv, - state, + state: session, init: Some(init), } .run_inner() @@ -128,12 +109,7 @@ impl ControlRoutine { } let res = loop { - tracing::info!("WAIT"); tokio::select! { - // _ = self.state.notify_complete.notified() => { - // tracing::info!("NOTIFIED!"); - // break Ok(()) - // }, message = self.control_recv.recv() => { match message { Some(message) => self.on_control_message(message?)?, @@ -198,60 +174,22 @@ impl ControlRoutine { session: SharedSessionState, message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - let capability = session + let _capability = session .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) .await; - capability.try_granted_area(&message.area_of_interest.area)?; - let mut state = session.state.borrow_mut(); - let their_handle = state.their_resources.areas_of_interest.bind(message); - match state.find_shared_aoi_from_theirs(&their_handle)? { - None => { - debug!("no shared aoi, skip"); - Ok(()) - } - Some(our_handle) => { - drop(state); - debug!("shared aoi found, start reconcile"); - Self::start_reconcile(session, (our_handle, their_handle)) - } - } - } - - fn start_reconcile( - mut session: SharedSessionState, - (our_handle, their_handle): AreaOfInterestHandlePair, - ) -> Result<(), Error> { - let recv = session - .reconcile_state - .borrow_mut() - .take_receiver() - .ok_or(Error::InvalidMessageInCurrentState)?; - let snapshot = Rc::new(session.store().snapshot()?); - let fut = ReconcileRoutine { - state: session.clone(), - snapshot, - recv, - } - .run((our_handle, their_handle)); - let task_key = session.spawn(error_span!("reconcile"), |_| fut); - *session.reconcile_state.borrow_mut() = ReconcileState::Running(task_key); + session + .state_mut() + .bind_area_of_interest(Scope::Theirs, message)?; Ok(()) } async fn setup(session: SharedSessionState, init: SessionInit) -> Result<(), Error> { debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { - if *capability.receiver() != init.user_secret_key.public_key() { - return Err(Error::WrongSecretKeyForCapability); - } - // TODO: implement private area intersection let intersection_handle = 0.into(); - let (our_capability_handle, message) = session.state_mut().bind_and_sign_capability( - &init.user_secret_key, - intersection_handle, - capability, - )?; + let (our_capability_handle, message) = + session.bind_and_sign_capability(intersection_handle, capability)?; if let Some(message) = message { session.send(message).await?; } @@ -261,24 +199,11 @@ impl ControlRoutine { area_of_interest, authorisation: our_capability_handle, }; - - let (our_handle, is_new) = session + // TODO: We could skip the clone if we re-enabled sending by reference. + session .state_mut() - .our_resources - .areas_of_interest - .bind_if_new(msg.clone()); - - if is_new { - session.send(msg).await?; - if let Some(their_handle) = - session.state_mut().find_shared_aoi_from_ours(&our_handle)? - { - debug!("sent aoi, shared, start reconcile"); - Self::start_reconcile(session.clone(), (our_handle, their_handle))?; - } else { - debug!("sent aoi, not yet shared"); - } - } + .bind_area_of_interest(Scope::Ours, msg.clone())?; + session.send(msg).await?; } } debug!("setup done"); @@ -291,72 +216,74 @@ impl ControlRoutine { } #[derive(derive_more::Debug)] -pub struct ReconcileRoutine { - snapshot: Rc, +pub struct Reconciler { + snapshot: Rc, recv: MessageReceiver, - state: SharedSessionState, + session: SharedSessionState, } // Note that all async methods yield to the owner of the coroutine. They are not running in a tokio // context. You may not perform regular async operations in them. -impl ReconcileRoutine { - pub async fn run(mut self, shared_aoi: AreaOfInterestHandlePair) -> Result<(), Error> { - let our_role = self.state().our_role; - tracing::warn!(init = our_role.is_alfie(), "start reconciliation"); - // optionally initiate reconciliation with a first fingerprint. only alfie may do this. - if our_role.is_alfie() { - self.initiate(shared_aoi.0, shared_aoi.1).await?; - } +impl Reconciler { + pub fn new( + session: SharedSessionState, + recv: MessageReceiver, + ) -> Result { + let snapshot = session.store().snapshot()?; + Ok(Self { + recv, + snapshot: Rc::new(snapshot), + session, + }) + } - while let Some(message) = self.recv.recv().await { - let message = message?; - debug!(?message, "recv"); - match message { - ReconciliationMessage::SendFingerprint(message) => { - self.on_send_fingerprint(message).await? + pub async fn run(mut self) -> Result<(), Error> { + let our_role = self.state().our_role; + loop { + tokio::select! { + message = self.recv.try_next() => { + match message? { + None => break, + Some(message) => self.on_message(message).await?, + } } - ReconciliationMessage::AnnounceEntries(message) => { - self.on_announce_entries(message).await? + Some(intersection) = self.session.next_aoi_intersection() => { + if our_role.is_alfie() { + self.initiate(intersection).await?; + } } - ReconciliationMessage::SendEntry(message) => self.on_send_entry(message).await?, - }; - + } if self.state().reconciliation_is_complete() { - tracing::info!("reconciliation complete, close session"); + debug!("reconciliation complete, close session"); break; } } + Ok(()) + } + async fn on_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { + match message { + ReconciliationMessage::SendFingerprint(message) => { + self.on_send_fingerprint(message).await? + } + ReconciliationMessage::AnnounceEntries(message) => { + self.on_announce_entries(message).await? + } + ReconciliationMessage::SendEntry(message) => self.on_send_entry(message).await?, + }; Ok(()) } - async fn initiate( - &mut self, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - ) -> Result<(), Error> { - let mut state = self.state(); - let our_aoi = state.our_resources.areas_of_interest.try_get(&our_handle)?; - let their_aoi = state - .their_resources - .areas_of_interest - .try_get(&their_handle)?; - - let our_capability = state - .our_resources - .capabilities - .try_get(&our_aoi.authorisation)?; - let namespace: NamespaceId = our_capability.granted_namespace().into(); - - let common_aoi = &our_aoi - .area() - .intersection(&their_aoi.area()) - .ok_or(Error::AreaOfInterestDoesNotOverlap)?; - - let range = common_aoi.into_range(); - state.reconciliation_started = true; - drop(state); + async fn initiate(&mut self, intersection: AreaOfInterestIntersection) -> Result<(), Error> { + let AreaOfInterestIntersection { + our_handle, + their_handle, + intersection, + namespace, + } = intersection; + let range = intersection.into_range(); let fingerprint = self.snapshot.fingerprint(namespace, &range)?; + self.session.state_mut().reconciliation_started = true; self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) .await?; Ok(()) @@ -464,7 +391,7 @@ impl ReconcileRoutine { async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { let static_token = self - .state + .session .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) .await; @@ -476,7 +403,7 @@ impl ReconcileRoutine { message.dynamic_token, )?; - self.state.store().ingest_entry(&authorised_entry)?; + self.session.store().ingest_entry(&authorised_entry)?; Ok(()) } @@ -540,8 +467,10 @@ impl ReconcileRoutine { let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - let (static_token_handle, static_token_bind_msg) = - self.state.state_mut().bind_our_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = self + .session + .state_mut() + .bind_our_static_token(static_token)?; if let Some(msg) = static_token_bind_msg { self.send(msg).await?; } @@ -602,10 +531,10 @@ impl ReconcileRoutine { } fn state(&mut self) -> RefMut { - self.state.state_mut() + self.session.state_mut() } async fn send(&self, message: impl Into) -> Result<(), WriteError> { - self.state.send(message).await + self.session.send(message).await } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 943201720e..06dd3323f0 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -2,13 +2,16 @@ use ed25519_dalek::SignatureError; use crate::{ proto::{meadowcap::InvalidCapability, wgps::ResourceHandle, willow::Unauthorised}, + store::KeyStoreError, util::channel::{ReadError, WriteError}, }; #[derive(Debug, thiserror::Error)] pub enum Error { - #[error("local store failed")] + #[error("local store failed: {0}")] Store(#[from] anyhow::Error), + #[error("local store failed: {0}")] + KeyStore(#[from] KeyStoreError), #[error("failed to receive data: {0}")] Receive(#[from] ReadError), #[error("failed to send data: {0}")] diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index ed9e7f37ea..e69d6ee61f 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,28 +1,28 @@ use std::{ cell::{RefCell, RefMut}, - collections::HashSet, + collections::{HashSet, VecDeque}, + future::poll_fn, pin::Pin, rc::Rc, - task::Poll, + task::{Poll, Waker}, }; -use futures_lite::{future::poll_fn, StreamExt}; -use tokio::sync::Notify; +use futures_lite::{Stream, StreamExt}; use tracing::{warn, Instrument, Span}; use crate::{ net::InitialTransmission, proto::{ challenge::ChallengeState, - grouping::ThreeDRange, - keys::{NamespaceId, UserSecretKey}, + grouping::{Area, ThreeDRange}, + keys::NamespaceId, wgps::{ AreaOfInterestHandle, CapabilityHandle, CommitmentReveal, IntersectionHandle, IsHandle, Message, ReadCapability, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }, - store::Store, + store::{KeyStore, Store}, util::{ channel::WriteError, task_set::{TaskKey, TaskSet}, @@ -31,7 +31,6 @@ use crate::{ use super::{ channels::ChannelSenders, - coroutine::ReconcileState, resource::{ResourceMap, ScopedResources}, Error, Role, Scope, }; @@ -43,8 +42,6 @@ pub struct SharedSessionState { #[debug("Store")] pub store: Rc>, pub tasks: Rc>>>, - pub reconcile_state: Rc>, - // pub notify_complete: Rc, } impl Clone for SharedSessionState { fn clone(&self) -> Self { @@ -53,26 +50,24 @@ impl Clone for SharedSessionState { send: self.send.clone(), store: Rc::clone(&self.store), tasks: Rc::clone(&self.tasks), - reconcile_state: Rc::clone(&self.reconcile_state), - // notify_complete: Rc::clone(&self.notify_complete), } } } impl SharedSessionState { pub fn new( - state: SessionState, - send: ChannelSenders, store: Rc>, - reconcile_state: ReconcileState, + send: ChannelSenders, + our_role: Role, + initial_transmission: InitialTransmission, ) -> Self { + let state = SessionState::new(our_role, initial_transmission); Self { state: Rc::new(RefCell::new(state)), send, store, tasks: Default::default(), - reconcile_state: Rc::new(RefCell::new(reconcile_state)), - // notify_complete: Default::default(), + // reconcile_state: Rc::new(RefCell::new(reconcile_state)), } } @@ -89,7 +84,7 @@ impl SharedSessionState { } pub async fn join_next_task(&self) -> Option<(TaskKey, Result<(), Error>)> { - std::future::poll_fn(|cx| { + poll_fn(|cx| { let mut tasks = self.tasks.borrow_mut(); let res = std::task::ready!(Pin::new(&mut tasks).poll_next(cx)); let res = match res { @@ -106,6 +101,14 @@ impl SharedSessionState { self.send.send(message).await } + pub async fn next_aoi_intersection(&self) -> Option { + poll_fn(|cx| { + let mut aoi_queue = &mut self.state.borrow_mut().aoi_queue; + Pin::new(&mut aoi_queue).poll_next(cx) + }) + .await + } + pub async fn get_their_resource_eventually( &self, selector: F, @@ -124,29 +127,54 @@ impl SharedSessionState { .await } - pub async fn get_our_resource_eventually( + pub fn bind_and_sign_capability( &self, - selector: F, - handle: H, - ) -> R - where - F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, - { - let inner = self.state.clone(); - poll_fn(move |cx| { - let mut inner = inner.borrow_mut(); - let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).our_resources); - let r = std::task::ready!(res.poll_get_eventually(handle, cx)); - Poll::Ready(r.clone()) - }) - .await + our_intersection_handle: IntersectionHandle, + capability: ReadCapability, + ) -> Result<(CapabilityHandle, Option), Error> { + let mut inner = self.state.borrow_mut(); + let signable = inner.challenge.signable()?; + let signature = self + .store + .borrow_mut() + .key_store() + .sign_user(&capability.receiver().id(), &signable)?; + + let (our_handle, is_new) = inner + .our_resources + .capabilities + .bind_if_new(capability.clone()); + let maybe_message = is_new.then(|| SetupBindReadCapability { + capability, + handle: our_intersection_handle, + signature, + }); + Ok((our_handle, maybe_message)) } + // pub async fn get_our_resource_eventually( + // &self, + // selector: F, + // handle: H, + // ) -> R + // where + // F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + // { + // let inner = self.state.clone(); + // poll_fn(move |cx| { + // let mut inner = inner.borrow_mut(); + // let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).our_resources); + // let r = std::task::ready!(res.poll_get_eventually(handle, cx)); + // Poll::Ready(r.clone()) + // }) + // .await + // } + pub fn state_mut(&self) -> RefMut { self.state.borrow_mut() } - pub fn store(&mut self) -> RefMut { + pub fn store(&self) -> RefMut { self.store.borrow_mut() } } @@ -160,7 +188,7 @@ pub struct SessionState { pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, pub pending_entries: Option, pub challenge: ChallengeState, - // pub reconcile_state: ReconcileState + pub aoi_queue: AoiQueue, } impl SessionState { @@ -178,6 +206,7 @@ impl SessionState { their_resources: Default::default(), pending_ranges: Default::default(), pending_entries: Default::default(), + aoi_queue: Default::default(), } } fn resources(&self, scope: Scope) -> &ScopedResources { @@ -186,6 +215,12 @@ impl SessionState { Scope::Theirs => &self.their_resources, } } + // fn resources_mut(&mut self, scope: Scope) -> &ScopedResources { + // match scope { + // Scope::Ours => &mut self.our_resources, + // Scope::Theirs => &mut self.their_resources, + // } + // } pub fn reconciliation_is_complete(&self) -> bool { // tracing::debug!( // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {}", @@ -198,26 +233,6 @@ impl SessionState { && self.pending_entries.is_none() } - pub fn bind_and_sign_capability( - &mut self, - user_secret_key: &UserSecretKey, - our_intersection_handle: IntersectionHandle, - capability: ReadCapability, - ) -> Result<(CapabilityHandle, Option), Error> { - let signature = self.challenge.sign(user_secret_key)?; - - let (our_handle, is_new) = self - .our_resources - .capabilities - .bind_if_new(capability.clone()); - let maybe_message = is_new.then(|| SetupBindReadCapability { - capability, - handle: our_intersection_handle, - signature, - }); - Ok((our_handle, maybe_message)) - } - pub fn commitment_reveal(&mut self) -> Result { match self.challenge { ChallengeState::Committed { our_nonce, .. } => { @@ -251,50 +266,121 @@ impl SessionState { // &mut self, // msg: SetupBindAreaOfInterest, // ) -> Result, Error> { - // let capability = self - // .their_resources - // .capabilities - // .try_get(&msg.authorisation)?; - // capability.try_granted_area(&msg.area_of_interest.area)?; - // let their_handle = self.their_resources.areas_of_interest.bind(msg); - // - // let maybe_shared_aoi_handles = self - // .find_shared_aoi(&their_handle)? - // .map(|our_handle| (our_handle, their_handle)); - // Ok(maybe_shared_aoi_handles) + // // let capability = self + // // .their_resources + // // .capabilities + // // .try_get(&msg.authorisation)?; + // // capability.try_granted_area(&msg.area_of_interest.area)?; + // // let their_handle = self.their_resources.areas_of_interest.bind(msg); + // // + // // let maybe_shared_aoi_handles = self + // // .find_shared_aoi(&their_handle)? + // // .map(|our_handle| (our_handle, their_handle)); + // // Ok(maybe_shared_aoi_handles) + // todo!() // } - pub fn find_shared_aoi_from_theirs( - &self, - their_handle: &AreaOfInterestHandle, - ) -> Result, Error> { - let their_aoi = self - .their_resources - .areas_of_interest - .try_get(their_handle)?; - let maybe_our_handle = self - .our_resources - .areas_of_interest - .iter() - .find(|(_handle, aoi)| aoi.area().intersection(their_aoi.area()).is_some()) - .map(|(handle, _aoi)| *handle); - Ok(maybe_our_handle) - } + /// Bind a area of interest, and start reconciliation if this area of interest has an + /// intersection with a remote area of interest. + /// + /// Will fail if the capability is missing. Await [`Self::get_our_resource_eventually`] or + /// [`Self::get_their_resource_eventually`] before calling this. + /// + /// Returns `true` if the capability was newly bound, and `false` if not. + pub fn bind_area_of_interest( + &mut self, + scope: Scope, + msg: SetupBindAreaOfInterest, + ) -> Result<(), Error> { + // let resources = match scope { + // Scope::Ours => &mut self.our_resources, + // Scope::Theirs => &mut self.their_resources + // }; - pub fn find_shared_aoi_from_ours( - &self, - our_handle: &AreaOfInterestHandle, - ) -> Result, Error> { - let our_aoi = self.our_resources.areas_of_interest.try_get(our_handle)?; - let maybe_their_handle = self - .their_resources - .areas_of_interest - .iter() - .find(|(_handle, aoi)| aoi.area().intersection(our_aoi.area()).is_some()) - .map(|(handle, _aoi)| *handle); - Ok(maybe_their_handle) + let capability = match scope { + Scope::Ours => self + .our_resources + .capabilities + .try_get(&msg.authorisation)?, + Scope::Theirs => self + .their_resources + .capabilities + .try_get(&msg.authorisation)?, + }; + capability.try_granted_area(&msg.area_of_interest.area)?; + + let namespace = *capability.granted_namespace(); + let area = msg.area_of_interest.area.clone(); + let handle = match scope { + Scope::Ours => self.our_resources.areas_of_interest.bind(msg), + Scope::Theirs => self.their_resources.areas_of_interest.bind(msg), + }; + + let haystack = match scope { + Scope::Ours => &self.their_resources, + Scope::Theirs => &self.our_resources, + }; + + for (candidate_handle, candidate) in haystack.areas_of_interest.iter() { + let candidate_handle = *candidate_handle; + // Ignore areas without a capability. + let Some(cap) = haystack.capabilities.get(&candidate.authorisation) else { + continue; + }; + // Ignore areas for a different namespace. + if *cap.granted_namespace() != namespace { + continue; + } + // Check if we have an intersection. + if let Some(intersection) = candidate.area().intersection(&area) { + // We found an intersection! + let (our_handle, their_handle) = match scope { + Scope::Ours => (handle, candidate_handle), + Scope::Theirs => (candidate_handle, handle), + }; + let shared = AreaOfInterestIntersection { + our_handle, + their_handle, + intersection, + namespace: namespace.into(), + }; + self.aoi_queue.push(shared); + } + } + Ok(()) } + // pub fn find_shared_aoi_from_theirs( + // &self, + // their_handle: &AreaOfInterestHandle, + // ) -> Result, Error> { + // let their_aoi = self + // .their_resources + // .areas_of_interest + // .try_get(their_handle)?; + // let maybe_our_handle = self + // .our_resources + // .areas_of_interest + // .iter() + // .find(|(_handle, aoi)| aoi.area().intersection(their_aoi.area()).is_some()) + // .map(|(handle, _aoi)| *handle); + // Ok(maybe_our_handle) + // } + // + // pub fn find_shared_aoi_from_ours( + // &self, + // our_handle: &AreaOfInterestHandle, + // ) -> Result, Error> { + // let our_aoi = self.our_resources.areas_of_interest.try_get(our_handle)?; + // let maybe_their_handle = self + // .their_resources + // .areas_of_interest + // .iter() + // .find(|(_handle, aoi)| aoi.area().intersection(our_aoi.area()).is_some()) + // .map(|(handle, _aoi)| *handle); + // Ok(maybe_their_handle) + // } + pub fn on_send_entry(&mut self) -> Result<(), Error> { let remaining = self .pending_entries @@ -380,7 +466,58 @@ impl SessionState { } } -// struct AoiFinder { -// ours: HashSet, + closed: bool, + wakers: VecDeque, +} + +impl AoiQueue { + pub fn push(&mut self, pair: AreaOfInterestIntersection) { + self.found.push_back(pair); + self.wake(); + } + pub fn close(&mut self) { + self.closed = true; + self.wake(); + } + fn wake(&mut self) { + for waker in self.wakers.drain(..) { + waker.wake(); + } + } + + pub fn poll_next( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + if self.closed { + return Poll::Ready(None); + } + if let Some(item) = self.found.pop_front() { + Poll::Ready(Some(item)) + } else { + self.wakers.push_back(cx.waker().to_owned()); + Poll::Pending + } + } +} + +impl Stream for AoiQueue { + type Item = AreaOfInterestIntersection; + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + Self::poll_next(self.get_mut(), cx) + } +} diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 97d355a7c0..8926091785 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -4,6 +4,8 @@ use anyhow::Result; use crate::proto::{ grouping::{Range, RangeEnd, ThreeDRange}, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap, wgps::Fingerprint, willow::{AuthorisedEntry, Entry, NamespaceId}, }; @@ -25,6 +27,30 @@ impl Default for SyncConfig { } } +#[derive(Debug, thiserror::Error)] +pub enum KeyStoreError { + #[error("store failed: {0}")] + Store(#[from] anyhow::Error), + #[error("missing secret key")] + MissingKey, +} + +#[derive(Debug, Copy, Clone)] +pub enum KeyScope { + Namespace, + User, +} + +pub trait KeyStore: Send + 'static { + fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError>; + fn sign_user(&self, id: &UserId, message: &[u8]) -> Result; + fn sign_namespace( + &self, + id: &NamespaceId, + message: &[u8], + ) -> Result; +} + pub trait ReadonlyStore: Send + 'static { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; @@ -55,15 +81,57 @@ pub trait ReadonlyStore: Send + 'static { pub trait Store: ReadonlyStore + 'static { type Snapshot: ReadonlyStore + Send; + type KeyStore: KeyStore; fn snapshot(&mut self) -> Result; fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result; + fn key_store(&mut self) -> &mut Self::KeyStore; } /// A very inefficient in-memory store, for testing purposes only #[derive(Debug, Default)] pub struct MemoryStore { entries: HashMap>, + keys: MemoryKeyStore, +} + +#[derive(Debug, Default)] +pub struct MemoryKeyStore { + user: HashMap, + namespace: HashMap, +} + +impl KeyStore for MemoryKeyStore { + fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError> { + Ok(match secret { + meadowcap::SecretKey::User(secret) => { + self.user.insert(secret.id(), secret); + } + meadowcap::SecretKey::Namespace(secret) => { + self.namespace.insert(secret.id(), secret); + } + }) + } + + fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { + Ok(self + .user + .get(id) + .ok_or(KeyStoreError::MissingKey)? + .sign(message)) + } + + fn sign_namespace( + &self, + id: &NamespaceId, + message: &[u8], + ) -> Result { + Ok(self + .namespace + .get(id) + .ok_or(KeyStoreError::MissingKey)? + .sign(message)) + } } impl ReadonlyStore for MemoryStore { @@ -196,11 +264,16 @@ impl ReadonlyStore for Arc { impl Store for MemoryStore { type Snapshot = Arc; + type KeyStore = MemoryKeyStore; fn snapshot(&mut self) -> Result { Ok(Arc::new(Self { entries: self.entries.clone(), + keys: Default::default(), })) } + fn key_store(&mut self) -> &mut Self::KeyStore { + &mut self.keys + } fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); diff --git a/iroh-willow/src/util/task_set.rs b/iroh-willow/src/util/task_set.rs new file mode 100644 index 0000000000..03d62f36d5 --- /dev/null +++ b/iroh-willow/src/util/task_set.rs @@ -0,0 +1,64 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures_concurrency::future::{future_group, FutureGroup}; +use futures_lite::Stream; + +#[derive(derive_more::Debug, Eq, PartialEq)] +#[debug("{:?}", _0)] +pub struct TaskKey(future_group::Key); + +/// A set of tasks. +/// +/// Similar to [`tokio::task::JoinSet`] but can also contain local tasks. +#[derive(Debug, derive_more::Deref)] +pub struct TaskSet { + tasks: future_group::Keyed>, +} + +impl Default for TaskSet { + fn default() -> Self { + Self { + tasks: FutureGroup::new().keyed(), + } + } +} + +impl TaskSet { + pub fn new() -> Self { + Self::default() + } + pub fn spawn_local + 'static>(&mut self, future: F) -> TaskKey { + let handle = tokio::task::spawn_local(future); + let key = self.tasks.insert(handle); + TaskKey(key) + } +} + +impl Stream for TaskSet { + type Item = (TaskKey, Result); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some((key, item)) = std::task::ready!(Pin::new(&mut self.tasks).poll_next(cx)) else { + return Poll::Ready(None); + }; + Poll::Ready(Some((TaskKey(key), item))) + } +} + +impl TaskSet { + pub fn spawn + 'static + Send>( + &mut self, + future: F, + ) -> future_group::Key { + let handle = tokio::task::spawn(future); + let key = self.tasks.insert(handle); + key + } + pub fn remove(&mut self, key: TaskKey) -> bool { + self.tasks.remove(key.0) + } +} From 8327d949b14cae5306d2654b8fdb7af7d3e2d825 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 15 May 2024 11:12:34 +0200 Subject: [PATCH 037/198] cleanups --- iroh-willow/src/actor.rs | 43 +++++----- iroh-willow/src/session.rs | 2 +- iroh-willow/src/session/coroutine.rs | 115 ++++++++++++++------------- iroh-willow/src/session/state.rs | 99 +++-------------------- iroh-willow/src/util/task_set.rs | 62 +++++++++------ 5 files changed, 129 insertions(+), 192 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 0a9bfca947..13e13f0c19 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -14,9 +14,9 @@ use crate::{ meadowcap, willow::{AuthorisedEntry, Entry}, }, - session::{coroutine::ControlRoutine, Channels, Error, Role, SessionInit, SharedSessionState}, + session::{coroutine::ControlRoutine, Channels, Error, Role, SessionInit, Session}, store::{KeyStore, Store}, - util::task_set::{TaskKey, TaskSet}, + util::task_set::{TaskKey, TaskMap}, }; pub const INBOX_CAP: usize = 1024; @@ -186,7 +186,7 @@ pub enum ToActor { #[derive(Debug)] struct ActiveSession { on_finish: oneshot::Sender>, - task_key: TaskKey, + task_key: TaskKey // state: SharedSessionState } @@ -195,7 +195,7 @@ pub struct StorageThread { inbox_rx: flume::Receiver, store: Rc>, sessions: HashMap, - session_tasks: TaskSet<(SessionId, Result<(), Error>)>, + session_tasks: TaskMap>, } impl StorageThread { @@ -219,14 +219,12 @@ impl StorageThread { } Ok(msg) => self.handle_message(msg)?, }, - Some((_key, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => match res { - Ok((id, res)) => { - self.complete_session(&id, res); - } - Err(err) => { - warn!("task failed to join: {err}"); - return Err(err.into()); - } + Some((id, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => { + let res = match res { + Ok(res) => res, + Err(err) => Err(err.into()) + }; + self.complete_session(&id, res); } }; } @@ -247,7 +245,7 @@ impl StorageThread { } => { let session_id = peer; let Channels { send, recv } = channels; - let session = SharedSessionState::new( + let session = Session::new( self.store.clone(), send, our_role, @@ -255,16 +253,11 @@ impl StorageThread { ); let task_key = self.session_tasks.spawn_local( - async move { - let res = ControlRoutine::run(session, recv, init).await; - (session_id, res) - } - .instrument(error_span!("session", peer = %peer.fmt_short())), + session_id, + ControlRoutine::run(session, recv, init) + .instrument(error_span!("session", peer = %peer.fmt_short())), ); - let active_session = ActiveSession { - on_finish, - task_key, - }; + let active_session = ActiveSession { on_finish, task_key }; self.sessions.insert(session_id, active_session); } ToActor::GetEntries { @@ -290,11 +283,11 @@ impl StorageThread { Ok(()) } - fn complete_session(&mut self, peer: &NodeId, result: Result<(), Error>) { - let session = self.sessions.remove(peer); + fn complete_session(&mut self, session_id: &NodeId, result: Result<(), Error>) { + let session = self.sessions.remove(session_id); if let Some(session) = session { - self.session_tasks.remove(session.task_key); session.on_finish.send(result).ok(); + self.session_tasks.remove(&session.task_key); } else { warn!("remove_session called for unknown session"); } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 314d1f6099..1c0ce37b11 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -12,7 +12,7 @@ mod util; pub use self::channels::Channels; pub use self::error::Error; -pub use self::state::{SessionState, SharedSessionState}; +pub use self::state::{SessionState, Session}; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 233f765382..7d575a718d 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -17,7 +17,7 @@ use crate::{ }, session::{ channels::LogicalChannelReceivers, Error, Scope, SessionInit, SessionState, - SharedSessionState, + Session, }, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, util::channel::{Receiver, WriteError}, @@ -33,13 +33,13 @@ const INITIAL_GUARANTEES: u64 = u64::MAX; #[derive(derive_more::Debug)] pub struct ControlRoutine { control_recv: Receiver, - state: SharedSessionState, + state: Session, init: Option, } impl ControlRoutine { pub async fn run( - session: SharedSessionState, + session: Session, recv: ChannelReceivers, init: SessionInit, ) -> Result<(), Error> { @@ -54,7 +54,7 @@ impl ControlRoutine { mut aoi_recv, } = logical_recv; - // spawn a task to handle incoming static tokens. + // Spawn a task to handle incoming static tokens. session.spawn(error_span!("stt"), move |session| async move { while let Some(message) = static_tokens_recv.try_next().await? { session.state_mut().on_setup_bind_static_token(message); @@ -62,7 +62,7 @@ impl ControlRoutine { Ok(()) }); - // spawn a task to handle incoming capabilities. + // Spawn a task to handle incoming capabilities. session.spawn(error_span!("cap"), move |session| async move { while let Some(message) = capability_recv.try_next().await? { session.state_mut().on_setup_bind_read_capability(message)?; @@ -70,7 +70,7 @@ impl ControlRoutine { Ok(()) }); - // spawn a task to handle incoming areas of interest. + // Spawn a task to handle incoming areas of interest. session.spawn(error_span!("aoi"), move |session| async move { while let Some(message) = aoi_recv.try_next().await? { Self::on_bind_area_of_interest(session.clone(), message).await?; @@ -78,28 +78,66 @@ impl ControlRoutine { Ok(()) }); - // spawn a task to handle reconciliation messages + // Spawn a task to handle reconciliation messages session.spawn(error_span!("rec"), move |session| async move { Reconciler::new(session, reconciliation_recv)?.run().await }); + // Spawn a task to handle control messages + session.spawn(tracing::Span::current(), move |session| async move { + ControlRoutine::new(session, control_recv, init) + .run_inner() + .await + }); + + // Loop over task completions, break on failure or if reconciliation completed + while let Some((span, result)) = session.join_next_task().await { + let guard = span.enter(); + debug!(?result, "task completed"); + result?; + // Is this the right place for this check? It would run after each task + // completion, so necessarily including the completion of the reconciliation + // task, which is the only condition in which reconciliation can complete at + // the moment. + // + // TODO: We'll want to emit the completion event back to the application and + // let it decide what to do (stop, keep open) - or pass relevant config in + // SessionInit. + if session.state_mut().reconciliation_is_complete() { + tracing::debug!("stop session: reconciliation is complete"); + drop(guard); + break; + } + } + + // Close all our send streams. + // + // This makes the networking send loops stop. + session.send.close_all(); + + Ok(()) + } + + pub fn new( + session: Session, + control_recv: Receiver, + init: SessionInit, + ) -> Self { Self { control_recv, state: session, init: Some(init), } - .run_inner() - .await } async fn run_inner(mut self) -> Result<(), Error> { debug!(role = ?self.state().our_role, "start session"); - // reveal our nonce. + // Reveal our nonce. let reveal_message = self.state().commitment_reveal()?; self.state.send(reveal_message).await?; - // issue guarantees for all logical channels. + // Issue guarantees for all logical channels. for channel in LogicalChannel::iter() { let msg = ControlIssueGuarantee { amount: INITIAL_GUARANTEES, @@ -108,41 +146,13 @@ impl ControlRoutine { self.state.send(msg).await?; } - let res = loop { - tokio::select! { - message = self.control_recv.recv() => { - match message { - Some(message) => self.on_control_message(message?)?, - // If the remote closed their control stream, we abort the session. - None => break Ok(()), - } - }, - Some((key, result)) = self.state.join_next_task(), if !self.state.tasks.borrow().is_empty() => { - debug!(?key, ?result, "task completed"); - result?; - // Is this the right place for this check? It would run after each task - // completion, so necessarily including the completion of the reconciliation - // task, which is the only condition in which reconciliation can complete at - // the moment. - // - // TODO: We'll want to emit the completion event back to the application and - // let it decide what to do (stop, keep open) - or pass relevant config in - // SessionInit. - if self.state.state_mut().reconciliation_is_complete() { - tracing::debug!("stop session: reconciliation is complete"); - break Ok(()); - } - } - } - }; - - // Close all our send streams. - // - // This makes the networking send loops stop. - self.state.send.close_all(); + while let Some(message) = self.control_recv.try_next().await? { + self.on_control_message(message)?; + } - res + Ok(()) } + fn on_control_message(&mut self, message: Message) -> Result<(), Error> { debug!(%message, "recv"); match message { @@ -161,9 +171,6 @@ impl ControlRoutine { debug!(?channel, %amount, "add guarantees"); sender.add_guarantees(amount); } - // Message::ControlFreeHandle(_msg) => { - // TODO: Free handles - // } _ => return Err(Error::UnsupportedMessage), } @@ -171,10 +178,10 @@ impl ControlRoutine { } async fn on_bind_area_of_interest( - session: SharedSessionState, + session: Session, message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - let _capability = session + session .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) .await; session @@ -183,7 +190,7 @@ impl ControlRoutine { Ok(()) } - async fn setup(session: SharedSessionState, init: SessionInit) -> Result<(), Error> { + async fn setup(session: Session, init: SessionInit) -> Result<(), Error> { debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { // TODO: implement private area intersection @@ -217,16 +224,14 @@ impl ControlRoutine { #[derive(derive_more::Debug)] pub struct Reconciler { - snapshot: Rc, + session: Session, recv: MessageReceiver, - session: SharedSessionState, + snapshot: Rc, } -// Note that all async methods yield to the owner of the coroutine. They are not running in a tokio -// context. You may not perform regular async operations in them. impl Reconciler { pub fn new( - session: SharedSessionState, + session: Session, recv: MessageReceiver, ) -> Result { let snapshot = session.store().snapshot()?; diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index e69d6ee61f..3e082ab993 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -7,7 +7,7 @@ use std::{ task::{Poll, Waker}, }; -use futures_lite::{Stream, StreamExt}; +use futures_lite::Stream; use tracing::{warn, Instrument, Span}; use crate::{ @@ -23,10 +23,7 @@ use crate::{ }, }, store::{KeyStore, Store}, - util::{ - channel::WriteError, - task_set::{TaskKey, TaskSet}, - }, + util::{channel::WriteError, task_set::TaskMap}, }; use super::{ @@ -36,14 +33,14 @@ use super::{ }; #[derive(derive_more::Debug)] -pub struct SharedSessionState { +pub struct Session { pub state: Rc>, pub send: ChannelSenders, #[debug("Store")] pub store: Rc>, - pub tasks: Rc>>>, + pub tasks: Rc>>>, } -impl Clone for SharedSessionState { +impl Clone for Session { fn clone(&self) -> Self { Self { state: Rc::clone(&self.state), @@ -54,7 +51,7 @@ impl Clone for SharedSessionState { } } -impl SharedSessionState { +impl Session { pub fn new( store: Rc>, send: ChannelSenders, @@ -67,23 +64,21 @@ impl SharedSessionState { send, store, tasks: Default::default(), - // reconcile_state: Rc::new(RefCell::new(reconcile_state)), } } - pub fn spawn(&self, span: Span, f: F) -> TaskKey + pub fn spawn(&self, span: Span, f: F) where - F: FnOnce(SharedSessionState) -> Fut, + F: FnOnce(Session) -> Fut, Fut: std::future::Future> + 'static, { let state = self.clone(); let fut = f(state); - let fut = fut.instrument(span); - let key = self.tasks.borrow_mut().spawn_local(fut); - key + let fut = fut.instrument(span.clone()); + self.tasks.borrow_mut().spawn_local(span, fut); } - pub async fn join_next_task(&self) -> Option<(TaskKey, Result<(), Error>)> { + pub async fn join_next_task(&self) -> Option<(Span, Result<(), Error>)> { poll_fn(|cx| { let mut tasks = self.tasks.borrow_mut(); let res = std::task::ready!(Pin::new(&mut tasks).poll_next(cx)); @@ -152,24 +147,6 @@ impl SharedSessionState { Ok((our_handle, maybe_message)) } - // pub async fn get_our_resource_eventually( - // &self, - // selector: F, - // handle: H, - // ) -> R - // where - // F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, - // { - // let inner = self.state.clone(); - // poll_fn(move |cx| { - // let mut inner = inner.borrow_mut(); - // let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).our_resources); - // let r = std::task::ready!(res.poll_get_eventually(handle, cx)); - // Poll::Ready(r.clone()) - // }) - // .await - // } - pub fn state_mut(&self) -> RefMut { self.state.borrow_mut() } @@ -262,24 +239,6 @@ impl SessionState { self.their_resources.static_tokens.bind(msg.static_token); } - // pub fn on_setup_bind_area_of_interest( - // &mut self, - // msg: SetupBindAreaOfInterest, - // ) -> Result, Error> { - // // let capability = self - // // .their_resources - // // .capabilities - // // .try_get(&msg.authorisation)?; - // // capability.try_granted_area(&msg.area_of_interest.area)?; - // // let their_handle = self.their_resources.areas_of_interest.bind(msg); - // // - // // let maybe_shared_aoi_handles = self - // // .find_shared_aoi(&their_handle)? - // // .map(|our_handle| (our_handle, their_handle)); - // // Ok(maybe_shared_aoi_handles) - // todo!() - // } - /// Bind a area of interest, and start reconciliation if this area of interest has an /// intersection with a remote area of interest. /// @@ -292,11 +251,6 @@ impl SessionState { scope: Scope, msg: SetupBindAreaOfInterest, ) -> Result<(), Error> { - // let resources = match scope { - // Scope::Ours => &mut self.our_resources, - // Scope::Theirs => &mut self.their_resources - // }; - let capability = match scope { Scope::Ours => self .our_resources @@ -350,37 +304,6 @@ impl SessionState { Ok(()) } - // pub fn find_shared_aoi_from_theirs( - // &self, - // their_handle: &AreaOfInterestHandle, - // ) -> Result, Error> { - // let their_aoi = self - // .their_resources - // .areas_of_interest - // .try_get(their_handle)?; - // let maybe_our_handle = self - // .our_resources - // .areas_of_interest - // .iter() - // .find(|(_handle, aoi)| aoi.area().intersection(their_aoi.area()).is_some()) - // .map(|(handle, _aoi)| *handle); - // Ok(maybe_our_handle) - // } - // - // pub fn find_shared_aoi_from_ours( - // &self, - // our_handle: &AreaOfInterestHandle, - // ) -> Result, Error> { - // let our_aoi = self.our_resources.areas_of_interest.try_get(our_handle)?; - // let maybe_their_handle = self - // .their_resources - // .areas_of_interest - // .iter() - // .find(|(_handle, aoi)| aoi.area().intersection(our_aoi.area()).is_some()) - // .map(|(handle, _aoi)| *handle); - // Ok(maybe_their_handle) - // } - pub fn on_send_entry(&mut self) -> Result<(), Error> { let remaining = self .pending_entries diff --git a/iroh-willow/src/util/task_set.rs b/iroh-willow/src/util/task_set.rs index 03d62f36d5..1e4ef29b8d 100644 --- a/iroh-willow/src/util/task_set.rs +++ b/iroh-willow/src/util/task_set.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, future::Future, pin::Pin, task::{Context, Poll}, @@ -13,52 +14,67 @@ pub struct TaskKey(future_group::Key); /// A set of tasks. /// -/// Similar to [`tokio::task::JoinSet`] but can also contain local tasks. -#[derive(Debug, derive_more::Deref)] -pub struct TaskSet { +/// Similar to [`tokio::task::JoinSet`] but can also contain local tasks, and each task is +/// identified by a key which is returned upon completion of the task. +#[derive(Debug)] +pub struct TaskMap { tasks: future_group::Keyed>, + keys: HashMap, } -impl Default for TaskSet { +impl Default for TaskMap { fn default() -> Self { Self { tasks: FutureGroup::new().keyed(), + keys: HashMap::new(), } } } -impl TaskSet { +impl TaskMap { pub fn new() -> Self { Self::default() } - pub fn spawn_local + 'static>(&mut self, future: F) -> TaskKey { + pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { let handle = tokio::task::spawn_local(future); - let key = self.tasks.insert(handle); - TaskKey(key) + let k = self.tasks.insert(handle); + self.keys.insert(k, key); + TaskKey(k) } -} - -impl Stream for TaskSet { - type Item = (TaskKey, Result); - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + pub fn poll_next( + &mut self, + cx: &mut Context<'_>, + ) -> Poll)>> { let Some((key, item)) = std::task::ready!(Pin::new(&mut self.tasks).poll_next(cx)) else { return Poll::Ready(None); }; - Poll::Ready(Some((TaskKey(key), item))) + let key = self.keys.remove(&key).expect("key to exist"); + Poll::Ready(Some((key, item))) + } + + pub fn remove(&mut self, task_key: &TaskKey) -> bool { + self.keys.remove(&task_key.0); + self.tasks.remove(task_key.0) + } + + pub fn is_empty(&self) -> bool { + self.tasks.is_empty() } } -impl TaskSet { - pub fn spawn + 'static + Send>( - &mut self, - future: F, - ) -> future_group::Key { +impl Stream for TaskMap { + type Item = (K, Result); + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Self::poll_next(self.get_mut(), cx) + } +} + +impl TaskMap { + pub fn spawn + 'static + Send>(&mut self, future: F) -> TaskKey { let handle = tokio::task::spawn(future); let key = self.tasks.insert(handle); - key - } - pub fn remove(&mut self, key: TaskKey) -> bool { - self.tasks.remove(key.0) + TaskKey(key) } } From 95725be159a13f5d6a363db0c4655963565ee577 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 15 May 2024 11:47:46 +0200 Subject: [PATCH 038/198] hide refcells from public apis --- iroh-willow/src/actor.rs | 18 +- iroh-willow/src/session.rs | 2 +- iroh-willow/src/session/coroutine.rs | 114 +++------- iroh-willow/src/session/state.rs | 320 +++++++++++++++++---------- 4 files changed, 245 insertions(+), 209 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 13e13f0c19..071bb4d628 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -14,7 +14,7 @@ use crate::{ meadowcap, willow::{AuthorisedEntry, Entry}, }, - session::{coroutine::ControlRoutine, Channels, Error, Role, SessionInit, Session}, + session::{coroutine::ControlRoutine, Channels, Error, Role, Session, SessionInit}, store::{KeyStore, Store}, util::task_set::{TaskKey, TaskMap}, }; @@ -186,8 +186,7 @@ pub enum ToActor { #[derive(Debug)] struct ActiveSession { on_finish: oneshot::Sender>, - task_key: TaskKey - // state: SharedSessionState + task_key: TaskKey, // state: SharedSessionState } #[derive(Debug)] @@ -245,19 +244,18 @@ impl StorageThread { } => { let session_id = peer; let Channels { send, recv } = channels; - let session = Session::new( - self.store.clone(), - send, - our_role, - initial_transmission, - ); + let session = + Session::new(self.store.clone(), send, our_role, initial_transmission); let task_key = self.session_tasks.spawn_local( session_id, ControlRoutine::run(session, recv, init) .instrument(error_span!("session", peer = %peer.fmt_short())), ); - let active_session = ActiveSession { on_finish, task_key }; + let active_session = ActiveSession { + on_finish, + task_key, + }; self.sessions.insert(session_id, active_session); } ToActor::GetEntries { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 1c0ce37b11..5ed69deea5 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -12,7 +12,7 @@ mod util; pub use self::channels::Channels; pub use self::error::Error; -pub use self::state::{SessionState, Session}; +pub use self::state::Session; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/coroutine.rs index 7d575a718d..a6ddfa2f3a 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/coroutine.rs @@ -1,4 +1,4 @@ -use std::{cell::RefMut, rc::Rc}; +use std::rc::Rc; use futures_lite::StreamExt; use strum::IntoEnumIterator; @@ -15,10 +15,7 @@ use crate::{ }, willow::AuthorisedEntry, }, - session::{ - channels::LogicalChannelReceivers, Error, Scope, SessionInit, SessionState, - Session, - }, + session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, util::channel::{Receiver, WriteError}, }; @@ -33,7 +30,7 @@ const INITIAL_GUARANTEES: u64 = u64::MAX; #[derive(derive_more::Debug)] pub struct ControlRoutine { control_recv: Receiver, - state: Session, + session: Session, init: Option, } @@ -57,7 +54,7 @@ impl ControlRoutine { // Spawn a task to handle incoming static tokens. session.spawn(error_span!("stt"), move |session| async move { while let Some(message) = static_tokens_recv.try_next().await? { - session.state_mut().on_setup_bind_static_token(message); + session.on_setup_bind_static_token(message); } Ok(()) }); @@ -65,7 +62,7 @@ impl ControlRoutine { // Spawn a task to handle incoming capabilities. session.spawn(error_span!("cap"), move |session| async move { while let Some(message) = capability_recv.try_next().await? { - session.state_mut().on_setup_bind_read_capability(message)?; + session.on_setup_bind_read_capability(message)?; } Ok(()) }); @@ -73,7 +70,7 @@ impl ControlRoutine { // Spawn a task to handle incoming areas of interest. session.spawn(error_span!("aoi"), move |session| async move { while let Some(message) = aoi_recv.try_next().await? { - Self::on_bind_area_of_interest(session.clone(), message).await?; + session.on_bind_area_of_interest(message).await?; } Ok(()) }); @@ -103,7 +100,7 @@ impl ControlRoutine { // TODO: We'll want to emit the completion event back to the application and // let it decide what to do (stop, keep open) - or pass relevant config in // SessionInit. - if session.state_mut().reconciliation_is_complete() { + if session.reconciliation_is_complete() { tracing::debug!("stop session: reconciliation is complete"); drop(guard); break; @@ -113,29 +110,25 @@ impl ControlRoutine { // Close all our send streams. // // This makes the networking send loops stop. - session.send.close_all(); + session.close_senders(); Ok(()) } - pub fn new( - session: Session, - control_recv: Receiver, - init: SessionInit, - ) -> Self { + pub fn new(session: Session, control_recv: Receiver, init: SessionInit) -> Self { Self { control_recv, - state: session, + session, init: Some(init), } } async fn run_inner(mut self) -> Result<(), Error> { - debug!(role = ?self.state().our_role, "start session"); + debug!(role = ?self.session.our_role(), "start session"); // Reveal our nonce. - let reveal_message = self.state().commitment_reveal()?; - self.state.send(reveal_message).await?; + let reveal_message = self.session.reveal_commitment()?; + self.session.send(reveal_message).await?; // Issue guarantees for all logical channels. for channel in LogicalChannel::iter() { @@ -143,7 +136,7 @@ impl ControlRoutine { amount: INITIAL_GUARANTEES, channel, }; - self.state.send(msg).await?; + self.session.send(msg).await?; } while let Some(message) = self.control_recv.try_next().await? { @@ -157,19 +150,18 @@ impl ControlRoutine { debug!(%message, "recv"); match message { Message::CommitmentReveal(msg) => { - self.state().on_commitment_reveal(msg)?; + self.session.on_commitment_reveal(msg)?; let init = self .init .take() .ok_or_else(|| Error::InvalidMessageInCurrentState)?; - self.state + self.session .spawn(error_span!("setup"), |state| Self::setup(state, init)); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; - let sender = self.state.send.get_logical(channel); debug!(?channel, %amount, "add guarantees"); - sender.add_guarantees(amount); + self.session.add_guarantees(channel, amount); } _ => return Err(Error::UnsupportedMessage), } @@ -177,19 +169,6 @@ impl ControlRoutine { Ok(()) } - async fn on_bind_area_of_interest( - session: Session, - message: SetupBindAreaOfInterest, - ) -> Result<(), Error> { - session - .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) - .await; - session - .state_mut() - .bind_area_of_interest(Scope::Theirs, message)?; - Ok(()) - } - async fn setup(session: Session, init: SessionInit) -> Result<(), Error> { debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { @@ -207,19 +186,13 @@ impl ControlRoutine { authorisation: our_capability_handle, }; // TODO: We could skip the clone if we re-enabled sending by reference. - session - .state_mut() - .bind_area_of_interest(Scope::Ours, msg.clone())?; + session.bind_area_of_interest(Scope::Ours, msg.clone())?; session.send(msg).await?; } } debug!("setup done"); Ok(()) } - - fn state(&mut self) -> RefMut { - self.state.state_mut() - } } #[derive(derive_more::Debug)] @@ -243,7 +216,7 @@ impl Reconciler { } pub async fn run(mut self) -> Result<(), Error> { - let our_role = self.state().our_role; + let our_role = self.session.our_role(); loop { tokio::select! { message = self.recv.try_next() => { @@ -258,7 +231,7 @@ impl Reconciler { } } } - if self.state().reconciliation_is_complete() { + if self.session.reconciliation_is_complete() { debug!("reconciliation complete, close session"); break; } @@ -288,7 +261,6 @@ impl Reconciler { } = intersection; let range = intersection.into_range(); let fingerprint = self.snapshot.fingerprint(namespace, &range)?; - self.session.state_mut().reconciliation_started = true; self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) .await?; Ok(()) @@ -298,22 +270,16 @@ impl Reconciler { &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { + let namespace = self.session.on_send_fingerprint(&message)?; trace!("on_send_fingerprint start"); let ReconciliationSendFingerprint { range, fingerprint: their_fingerprint, sender_handle: their_handle, receiver_handle: our_handle, - is_final_reply_for_range, + is_final_reply_for_range: _, } = message; - let namespace = { - let mut state = self.state(); - state.reconciliation_started = true; - state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - state.range_is_authorised(&range, &our_handle, &their_handle)? - }; - let our_fingerprint = self.snapshot.fingerprint(namespace, &range)?; // case 1: fingerprint match. @@ -356,28 +322,17 @@ impl Reconciler { message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { trace!("on_announce_entries start"); + let namespace = self.session.on_announce_entries(&message)?; let ReconciliationAnnounceEntries { range, - count, + count: _, want_response, will_sort: _, sender_handle: their_handle, receiver_handle: our_handle, - is_final_reply_for_range, + is_final_reply_for_range: _, } = message; - let namespace = { - let mut state = self.state(); - state.clear_pending_range_if_some(our_handle, is_final_reply_for_range)?; - if state.pending_entries.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - let namespace = state.range_is_authorised(&range, &our_handle, &their_handle)?; - if count != 0 { - state.pending_entries = Some(count); - } - namespace - }; if want_response { self.announce_and_send_entries( namespace, @@ -400,7 +355,7 @@ impl Reconciler { .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) .await; - self.state().on_send_entry()?; + self.session.on_send_entry()?; let authorised_entry = AuthorisedEntry::try_from_parts( message.entry.entry, @@ -421,9 +376,7 @@ impl Reconciler { their_handle: AreaOfInterestHandle, is_final_reply_for_range: Option, ) -> anyhow::Result<()> { - self.state() - .pending_ranges - .insert((our_handle, range.clone())); + self.session.insert_pending_range(our_handle, range.clone()); let msg = ReconciliationSendFingerprint { range, fingerprint, @@ -446,8 +399,7 @@ impl Reconciler { our_count: Option, ) -> Result<(), Error> { if want_response { - let mut state = self.state(); - state.pending_ranges.insert((our_handle, range.clone())); + self.session.insert_pending_range(our_handle, range.clone()); } let our_count = match our_count { Some(count) => count, @@ -472,10 +424,8 @@ impl Reconciler { let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - let (static_token_handle, static_token_bind_msg) = self - .session - .state_mut() - .bind_our_static_token(static_token)?; + let (static_token_handle, static_token_bind_msg) = + self.session.bind_our_static_token(static_token); if let Some(msg) = static_token_bind_msg { self.send(msg).await?; } @@ -535,10 +485,6 @@ impl Reconciler { Ok(()) } - fn state(&mut self) -> RefMut { - self.session.state_mut() - } - async fn send(&self, message: impl Into) -> Result<(), WriteError> { self.session.send(message).await } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 3e082ab993..1cdac5fe7e 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -17,8 +17,9 @@ use crate::{ grouping::{Area, ThreeDRange}, keys::NamespaceId, wgps::{ - AreaOfInterestHandle, CapabilityHandle, CommitmentReveal, IntersectionHandle, IsHandle, - Message, ReadCapability, SetupBindAreaOfInterest, SetupBindReadCapability, + AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, IntersectionHandle, + IsHandle, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, + ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }, @@ -32,25 +33,24 @@ use super::{ Error, Role, Scope, }; -#[derive(derive_more::Debug)] -pub struct Session { - pub state: Rc>, - pub send: ChannelSenders, - #[debug("Store")] - pub store: Rc>, - pub tasks: Rc>>>, -} +#[derive(Debug, derive_more::Deref)] +pub struct Session(Rc>); + impl Clone for Session { fn clone(&self) -> Self { - Self { - state: Rc::clone(&self.state), - send: self.send.clone(), - store: Rc::clone(&self.store), - tasks: Rc::clone(&self.tasks), - } + Self(Rc::clone(&self.0)) } } +#[derive(derive_more::Debug)] +pub struct SessionInner { + state: RefCell, + send: ChannelSenders, + #[debug("Store")] + store: Rc>, + tasks: RefCell>>, +} + impl Session { pub fn new( store: Rc>, @@ -59,12 +59,12 @@ impl Session { initial_transmission: InitialTransmission, ) -> Self { let state = SessionState::new(our_role, initial_transmission); - Self { - state: Rc::new(RefCell::new(state)), + Self(Rc::new(SessionInner { + state: RefCell::new(state), send, store, tasks: Default::default(), - } + })) } pub fn spawn(&self, span: Span, f: F) @@ -93,7 +93,22 @@ impl Session { } pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { - self.send.send(message).await + self.0.send.send(message).await + } + + pub fn close_senders(&self) { + self.0.send.close_all(); + } + + pub fn add_guarantees(&self, channel: LogicalChannel, amount: u64) { + self.0 + .send + .get(Channel::Logical(channel)) + .add_guarantees(amount); + } + + pub fn our_role(&self) -> Role { + self.state.borrow().our_role } pub async fn next_aoi_intersection(&self) -> Option { @@ -112,9 +127,9 @@ impl Session { where F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, { - let inner = self.state.clone(); + let inner = Rc::clone(&self); poll_fn(move |cx| { - let mut inner = inner.borrow_mut(); + let mut inner = inner.state.borrow_mut(); let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).their_resources); let r = std::task::ready!(res.poll_get_eventually(handle, cx)); Poll::Ready(r.clone()) @@ -147,57 +162,66 @@ impl Session { Ok((our_handle, maybe_message)) } - pub fn state_mut(&self) -> RefMut { - self.state.borrow_mut() + pub fn on_announce_entries( + &self, + message: &ReconciliationAnnounceEntries, + ) -> Result { + let mut state = self.state.borrow_mut(); + state.clear_pending_range_if_some( + message.receiver_handle, + message.is_final_reply_for_range.as_ref(), + )?; + if state.pending_entries.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + let namespace = state.range_is_authorised( + &message.range, + &message.receiver_handle, + &message.sender_handle, + )?; + if message.count != 0 { + state.pending_entries = Some(message.count); + } + Ok(namespace) } - pub fn store(&self) -> RefMut { - self.store.borrow_mut() + pub fn on_send_fingerprint( + &self, + message: &ReconciliationSendFingerprint, + ) -> Result { + let mut state = self.state.borrow_mut(); + state.reconciliation_started = true; + state.clear_pending_range_if_some( + message.receiver_handle, + message.is_final_reply_for_range.as_ref(), + )?; + let namespace = state.range_is_authorised( + &message.range, + &message.receiver_handle, + &message.sender_handle, + )?; + Ok(namespace) } -} -#[derive(Debug)] -pub struct SessionState { - pub our_role: Role, - pub our_resources: ScopedResources, - pub their_resources: ScopedResources, - pub reconciliation_started: bool, - pub pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, - pub pending_entries: Option, - pub challenge: ChallengeState, - pub aoi_queue: AoiQueue, -} - -impl SessionState { - pub fn new(our_role: Role, initial_transmission: InitialTransmission) -> Self { - let challenge_state = ChallengeState::Committed { - our_nonce: initial_transmission.our_nonce, - received_commitment: initial_transmission.received_commitment, - }; - // TODO: make use of initial_transmission.their_max_payload_size. - Self { - our_role, - challenge: challenge_state, - reconciliation_started: false, - our_resources: Default::default(), - their_resources: Default::default(), - pending_ranges: Default::default(), - pending_entries: Default::default(), - aoi_queue: Default::default(), - } + pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { + self.state + .borrow_mut() + .their_resources + .static_tokens + .bind(msg.static_token); } - fn resources(&self, scope: Scope) -> &ScopedResources { - match scope { - Scope::Ours => &self.our_resources, - Scope::Theirs => &self.their_resources, - } + + pub fn on_setup_bind_read_capability(&self, msg: SetupBindReadCapability) -> Result<(), Error> { + // TODO: verify intersection handle + msg.capability.validate()?; + let mut state = self.state.borrow_mut(); + state + .challenge + .verify(msg.capability.receiver(), &msg.signature)?; + state.their_resources.capabilities.bind(msg.capability); + Ok(()) } - // fn resources_mut(&mut self, scope: Scope) -> &ScopedResources { - // match scope { - // Scope::Ours => &mut self.our_resources, - // Scope::Theirs => &mut self.their_resources, - // } - // } + pub fn reconciliation_is_complete(&self) -> bool { // tracing::debug!( // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {}", @@ -205,13 +229,15 @@ impl SessionState { // self.pending_ranges.len(), // self.pending_entries.is_some() // ); - self.reconciliation_started - && self.pending_ranges.is_empty() - && self.pending_entries.is_none() + let state = self.state.borrow(); + state.reconciliation_started + && state.pending_ranges.is_empty() + && state.pending_entries.is_none() } - pub fn commitment_reveal(&mut self) -> Result { - match self.challenge { + pub fn reveal_commitment(&self) -> Result { + let state = self.state.borrow(); + match state.challenge { ChallengeState::Committed { our_nonce, .. } => { Ok(CommitmentReveal { nonce: our_nonce }) } @@ -219,24 +245,83 @@ impl SessionState { } } - pub fn on_commitment_reveal(&mut self, msg: CommitmentReveal) -> Result<(), Error> { - self.challenge.reveal(self.our_role, msg.nonce) + pub fn on_commitment_reveal(&self, msg: CommitmentReveal) -> Result<(), Error> { + let mut state = self.state.borrow_mut(); + let our_role = state.our_role; + state.challenge.reveal(our_role, msg.nonce) } - pub fn on_setup_bind_read_capability( - &mut self, - msg: SetupBindReadCapability, + pub fn bind_area_of_interest( + &self, + scope: Scope, + message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - // TODO: verify intersection handle - msg.capability.validate()?; - self.challenge - .verify(msg.capability.receiver(), &msg.signature)?; - self.their_resources.capabilities.bind(msg.capability); + self.state + .borrow_mut() + .bind_area_of_interest(scope, message) + } + + pub async fn on_bind_area_of_interest( + &self, + message: SetupBindAreaOfInterest, + ) -> Result<(), Error> { + self.get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) + .await; + self.bind_area_of_interest(Scope::Theirs, message)?; Ok(()) } - pub fn on_setup_bind_static_token(&mut self, msg: SetupBindStaticToken) { - self.their_resources.static_tokens.bind(msg.static_token); + pub fn on_send_entry(&self) -> Result<(), Error> { + self.state.borrow_mut().on_send_entry() + } + + pub fn bind_our_static_token( + &self, + token: StaticToken, + ) -> (StaticTokenHandle, Option) { + self.state.borrow_mut().bind_our_static_token(token) + } + + pub fn insert_pending_range(&self, our_handle: AreaOfInterestHandle, range: ThreeDRange) { + let mut state = self.state.borrow_mut(); + state.reconciliation_started = true; + state.pending_ranges.insert((our_handle, range)); + } + + pub fn store(&self) -> RefMut { + self.store.borrow_mut() + } +} + +#[derive(Debug)] +struct SessionState { + our_role: Role, + our_resources: ScopedResources, + their_resources: ScopedResources, + reconciliation_started: bool, + pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + pending_entries: Option, + challenge: ChallengeState, + aoi_queue: AoiQueue, +} + +impl SessionState { + fn new(our_role: Role, initial_transmission: InitialTransmission) -> Self { + let challenge_state = ChallengeState::Committed { + our_nonce: initial_transmission.our_nonce, + received_commitment: initial_transmission.received_commitment, + }; + // TODO: make use of initial_transmission.their_max_payload_size. + Self { + our_role, + challenge: challenge_state, + reconciliation_started: false, + our_resources: Default::default(), + their_resources: Default::default(), + pending_ranges: Default::default(), + pending_entries: Default::default(), + aoi_queue: Default::default(), + } } /// Bind a area of interest, and start reconciliation if this area of interest has an @@ -246,7 +331,7 @@ impl SessionState { /// [`Self::get_their_resource_eventually`] before calling this. /// /// Returns `true` if the capability was newly bound, and `false` if not. - pub fn bind_area_of_interest( + fn bind_area_of_interest( &mut self, scope: Scope, msg: SetupBindAreaOfInterest, @@ -304,7 +389,7 @@ impl SessionState { Ok(()) } - pub fn on_send_entry(&mut self) -> Result<(), Error> { + fn on_send_entry(&mut self) -> Result<(), Error> { let remaining = self .pending_entries .as_mut() @@ -316,10 +401,10 @@ impl SessionState { Ok(()) } - pub fn clear_pending_range_if_some( + fn clear_pending_range_if_some( &mut self, our_handle: AreaOfInterestHandle, - pending_range: Option, + pending_range: Option<&ThreeDRange>, ) -> Result<(), Error> { if let Some(range) = pending_range { // TODO: avoid clone @@ -334,33 +419,19 @@ impl SessionState { } } - pub fn bind_our_static_token( + fn bind_our_static_token( &mut self, static_token: StaticToken, - ) -> anyhow::Result<(StaticTokenHandle, Option)> { + ) -> (StaticTokenHandle, Option) { let (handle, is_new) = self .our_resources .static_tokens .bind_if_new(static_token.clone()); let msg = is_new.then(|| SetupBindStaticToken { static_token }); - Ok((handle, msg)) + (handle, msg) } - pub fn handle_to_namespace_id( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result { - let aoi = self.resources(scope).areas_of_interest.try_get(handle)?; - let capability = self - .resources(scope) - .capabilities - .try_get(&aoi.authorisation)?; - let namespace_id = capability.granted_namespace().into(); - Ok(namespace_id) - } - - pub fn range_is_authorised( + fn range_is_authorised( &self, range: &ThreeDRange, receiver_handle: &AreaOfInterestHandle, @@ -387,6 +458,27 @@ impl SessionState { ) -> Result<&SetupBindAreaOfInterest, Error> { self.resources(scope).areas_of_interest.try_get(handle) } + + fn handle_to_namespace_id( + &self, + scope: Scope, + handle: &AreaOfInterestHandle, + ) -> Result { + let aoi = self.resources(scope).areas_of_interest.try_get(handle)?; + let capability = self + .resources(scope) + .capabilities + .try_get(&aoi.authorisation)?; + let namespace_id = capability.granted_namespace().into(); + Ok(namespace_id) + } + + fn resources(&self, scope: Scope) -> &ScopedResources { + match scope { + Scope::Ours => &self.our_resources, + Scope::Theirs => &self.their_resources, + } + } } #[derive(Debug, Clone)] @@ -400,7 +492,7 @@ pub struct AreaOfInterestIntersection { #[derive(Default, Debug)] pub struct AoiQueue { found: VecDeque, - closed: bool, + // closed: bool, wakers: VecDeque, } @@ -409,10 +501,10 @@ impl AoiQueue { self.found.push_back(pair); self.wake(); } - pub fn close(&mut self) { - self.closed = true; - self.wake(); - } + // pub fn close(&mut self) { + // self.closed = true; + // self.wake(); + // } fn wake(&mut self) { for waker in self.wakers.drain(..) { waker.wake(); @@ -423,9 +515,9 @@ impl AoiQueue { &mut self, cx: &mut std::task::Context<'_>, ) -> Poll> { - if self.closed { - return Poll::Ready(None); - } + // if self.closed { + // return Poll::Ready(None); + // } if let Some(item) = self.found.pop_front() { Poll::Ready(Some(item)) } else { From 22a9236b50e7639f002d11091c6e42e3c5446a71 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 15 May 2024 11:58:00 +0200 Subject: [PATCH 039/198] cleanup module structure --- iroh-willow/src/actor.rs | 4 +- iroh-willow/src/session.rs | 7 +- iroh-willow/src/session/aoi_finder.rs | 16 -- .../session/{coroutine.rs => reconciler.rs} | 188 +----------------- iroh-willow/src/session/run.rs | 159 +++++++++++++++ 5 files changed, 171 insertions(+), 203 deletions(-) delete mode 100644 iroh-willow/src/session/aoi_finder.rs rename iroh-willow/src/session/{coroutine.rs => reconciler.rs} (60%) create mode 100644 iroh-willow/src/session/run.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 071bb4d628..fc395cd47d 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -14,7 +14,7 @@ use crate::{ meadowcap, willow::{AuthorisedEntry, Entry}, }, - session::{coroutine::ControlRoutine, Channels, Error, Role, Session, SessionInit}, + session::{self, Channels, Error, Role, Session, SessionInit}, store::{KeyStore, Store}, util::task_set::{TaskKey, TaskMap}, }; @@ -249,7 +249,7 @@ impl StorageThread { let task_key = self.session_tasks.spawn_local( session_id, - ControlRoutine::run(session, recv, init) + session::run(session, recv, init) .instrument(error_span!("session", peer = %peer.fmt_short())), ); let active_session = ActiveSession { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 5ed69deea5..ee27ec4dc3 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -2,16 +2,17 @@ use std::collections::{HashMap, HashSet}; use crate::proto::{grouping::AreaOfInterest, wgps::ReadCapability}; -pub mod aoi_finder; pub mod channels; -pub mod coroutine; mod error; -pub mod resource; +mod reconciler; +mod resource; +mod run; mod state; mod util; pub use self::channels::Channels; pub use self::error::Error; +pub use self::run::run; pub use self::state::Session; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs deleted file mode 100644 index 49169a90be..0000000000 --- a/iroh-willow/src/session/aoi_finder.rs +++ /dev/null @@ -1,16 +0,0 @@ -// use std::{cell::RefCell, collections::VecDeque, rc::Rc, task::Waker}; -// -// use crate::actor::AreaOfInterestPair; - -// pub struct AoiFinder { -// inner: Rc> -// } -// -// impl AoiFinder { -// fn push -// } -// -// struct Inner { -// queue: VecDeque, -// wakers: VecDeque -// } diff --git a/iroh-willow/src/session/coroutine.rs b/iroh-willow/src/session/reconciler.rs similarity index 60% rename from iroh-willow/src/session/coroutine.rs rename to iroh-willow/src/session/reconciler.rs index a6ddfa2f3a..0e8cb29569 100644 --- a/iroh-willow/src/session/coroutine.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,200 +1,24 @@ use std::rc::Rc; use futures_lite::StreamExt; -use strum::IntoEnumIterator; -use tracing::{debug, error_span, trace}; +use tracing::{debug, trace}; use crate::{ proto::{ grouping::ThreeDRange, keys::NamespaceId, wgps::{ - AreaOfInterestHandle, ControlIssueGuarantee, Fingerprint, LengthyEntry, LogicalChannel, - Message, ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, + AreaOfInterestHandle, Fingerprint, LengthyEntry, Message, + ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, + ReconciliationSendFingerprint, }, willow::AuthorisedEntry, }, - session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, + session::{channels::MessageReceiver, state::AreaOfInterestIntersection, Error, Session}, store::{ReadonlyStore, SplitAction, Store, SyncConfig}, - util::channel::{Receiver, WriteError}, + util::channel::WriteError, }; -use super::{ - channels::{ChannelReceivers, MessageReceiver}, - state::AreaOfInterestIntersection, -}; - -const INITIAL_GUARANTEES: u64 = u64::MAX; - -#[derive(derive_more::Debug)] -pub struct ControlRoutine { - control_recv: Receiver, - session: Session, - init: Option, -} - -impl ControlRoutine { - pub async fn run( - session: Session, - recv: ChannelReceivers, - init: SessionInit, - ) -> Result<(), Error> { - let ChannelReceivers { - control_recv, - logical_recv, - } = recv; - let LogicalChannelReceivers { - reconciliation_recv, - mut static_tokens_recv, - mut capability_recv, - mut aoi_recv, - } = logical_recv; - - // Spawn a task to handle incoming static tokens. - session.spawn(error_span!("stt"), move |session| async move { - while let Some(message) = static_tokens_recv.try_next().await? { - session.on_setup_bind_static_token(message); - } - Ok(()) - }); - - // Spawn a task to handle incoming capabilities. - session.spawn(error_span!("cap"), move |session| async move { - while let Some(message) = capability_recv.try_next().await? { - session.on_setup_bind_read_capability(message)?; - } - Ok(()) - }); - - // Spawn a task to handle incoming areas of interest. - session.spawn(error_span!("aoi"), move |session| async move { - while let Some(message) = aoi_recv.try_next().await? { - session.on_bind_area_of_interest(message).await?; - } - Ok(()) - }); - - // Spawn a task to handle reconciliation messages - session.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, reconciliation_recv)?.run().await - }); - - // Spawn a task to handle control messages - session.spawn(tracing::Span::current(), move |session| async move { - ControlRoutine::new(session, control_recv, init) - .run_inner() - .await - }); - - // Loop over task completions, break on failure or if reconciliation completed - while let Some((span, result)) = session.join_next_task().await { - let guard = span.enter(); - debug!(?result, "task completed"); - result?; - // Is this the right place for this check? It would run after each task - // completion, so necessarily including the completion of the reconciliation - // task, which is the only condition in which reconciliation can complete at - // the moment. - // - // TODO: We'll want to emit the completion event back to the application and - // let it decide what to do (stop, keep open) - or pass relevant config in - // SessionInit. - if session.reconciliation_is_complete() { - tracing::debug!("stop session: reconciliation is complete"); - drop(guard); - break; - } - } - - // Close all our send streams. - // - // This makes the networking send loops stop. - session.close_senders(); - - Ok(()) - } - - pub fn new(session: Session, control_recv: Receiver, init: SessionInit) -> Self { - Self { - control_recv, - session, - init: Some(init), - } - } - - async fn run_inner(mut self) -> Result<(), Error> { - debug!(role = ?self.session.our_role(), "start session"); - - // Reveal our nonce. - let reveal_message = self.session.reveal_commitment()?; - self.session.send(reveal_message).await?; - - // Issue guarantees for all logical channels. - for channel in LogicalChannel::iter() { - let msg = ControlIssueGuarantee { - amount: INITIAL_GUARANTEES, - channel, - }; - self.session.send(msg).await?; - } - - while let Some(message) = self.control_recv.try_next().await? { - self.on_control_message(message)?; - } - - Ok(()) - } - - fn on_control_message(&mut self, message: Message) -> Result<(), Error> { - debug!(%message, "recv"); - match message { - Message::CommitmentReveal(msg) => { - self.session.on_commitment_reveal(msg)?; - let init = self - .init - .take() - .ok_or_else(|| Error::InvalidMessageInCurrentState)?; - self.session - .spawn(error_span!("setup"), |state| Self::setup(state, init)); - } - Message::ControlIssueGuarantee(msg) => { - let ControlIssueGuarantee { amount, channel } = msg; - debug!(?channel, %amount, "add guarantees"); - self.session.add_guarantees(channel, amount); - } - _ => return Err(Error::UnsupportedMessage), - } - - Ok(()) - } - - async fn setup(session: Session, init: SessionInit) -> Result<(), Error> { - debug!(interests = init.interests.len(), "start setup"); - for (capability, aois) in init.interests.into_iter() { - // TODO: implement private area intersection - let intersection_handle = 0.into(); - let (our_capability_handle, message) = - session.bind_and_sign_capability(intersection_handle, capability)?; - if let Some(message) = message { - session.send(message).await?; - } - - for area_of_interest in aois { - let msg = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - // TODO: We could skip the clone if we re-enabled sending by reference. - session.bind_area_of_interest(Scope::Ours, msg.clone())?; - session.send(msg).await?; - } - } - debug!("setup done"); - Ok(()) - } -} - #[derive(derive_more::Debug)] pub struct Reconciler { session: Session, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs new file mode 100644 index 0000000000..579fb05e7d --- /dev/null +++ b/iroh-willow/src/session/run.rs @@ -0,0 +1,159 @@ +use futures_lite::StreamExt; +use strum::IntoEnumIterator; +use tracing::{debug, error_span}; + +use crate::{ + proto::wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, + session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, + store::Store, + util::channel::Receiver, +}; + +use super::{channels::ChannelReceivers, reconciler::Reconciler}; + +const INITIAL_GUARANTEES: u64 = u64::MAX; + +pub async fn run( + session: Session, + recv: ChannelReceivers, + init: SessionInit, +) -> Result<(), Error> { + let ChannelReceivers { + control_recv, + logical_recv, + } = recv; + let LogicalChannelReceivers { + reconciliation_recv, + mut static_tokens_recv, + mut capability_recv, + mut aoi_recv, + } = logical_recv; + + // Spawn a task to handle incoming static tokens. + session.spawn(error_span!("stt"), move |session| async move { + while let Some(message) = static_tokens_recv.try_next().await? { + session.on_setup_bind_static_token(message); + } + Ok(()) + }); + + // Spawn a task to handle incoming capabilities. + session.spawn(error_span!("cap"), move |session| async move { + while let Some(message) = capability_recv.try_next().await? { + session.on_setup_bind_read_capability(message)?; + } + Ok(()) + }); + + // Spawn a task to handle incoming areas of interest. + session.spawn(error_span!("aoi"), move |session| async move { + while let Some(message) = aoi_recv.try_next().await? { + session.on_bind_area_of_interest(message).await?; + } + Ok(()) + }); + + // Spawn a task to handle reconciliation messages + session.spawn(error_span!("rec"), move |session| async move { + Reconciler::new(session, reconciliation_recv)?.run().await + }); + + // Spawn a task to handle control messages + session.spawn(tracing::Span::current(), move |session| async move { + control_loop(session, control_recv, init).await + }); + + // Loop over task completions, break on failure or if reconciliation completed + while let Some((span, result)) = session.join_next_task().await { + let guard = span.enter(); + debug!(?result, "task completed"); + result?; + // Is this the right place for this check? It would run after each task + // completion, so necessarily including the completion of the reconciliation + // task, which is the only condition in which reconciliation can complete at + // the moment. + // + // TODO: We'll want to emit the completion event back to the application and + // let it decide what to do (stop, keep open) - or pass relevant config in + // SessionInit. + if session.reconciliation_is_complete() { + tracing::debug!("stop session: reconciliation is complete"); + drop(guard); + break; + } + } + + // Close all our send streams. + // + // This makes the networking send loops stop. + session.close_senders(); + + Ok(()) +} + +async fn control_loop( + session: Session, + mut control_recv: Receiver, + init: SessionInit, +) -> Result<(), Error> { + debug!(role = ?session.our_role(), "start session"); + let mut init = Some(init); + + // Reveal our nonce. + let reveal_message = session.reveal_commitment()?; + session.send(reveal_message).await?; + + // Issue guarantees for all logical channels. + for channel in LogicalChannel::iter() { + let msg = ControlIssueGuarantee { + amount: INITIAL_GUARANTEES, + channel, + }; + session.send(msg).await?; + } + + while let Some(message) = control_recv.try_next().await? { + debug!(%message, "recv"); + match message { + Message::CommitmentReveal(msg) => { + session.on_commitment_reveal(msg)?; + let init = init.take().ok_or(Error::InvalidMessageInCurrentState)?; + // send setup messages, but in a separate task to not block incoming guarantees + session.spawn(error_span!("setup"), |session| setup(session, init)); + } + Message::ControlIssueGuarantee(msg) => { + let ControlIssueGuarantee { amount, channel } = msg; + debug!(?channel, %amount, "add guarantees"); + session.add_guarantees(channel, amount); + } + _ => return Err(Error::UnsupportedMessage), + } + } + + Ok(()) +} + +async fn setup(session: Session, init: SessionInit) -> Result<(), Error> { + debug!(interests = init.interests.len(), "start setup"); + for (capability, aois) in init.interests.into_iter() { + // TODO: implement private area intersection + let intersection_handle = 0.into(); + let (our_capability_handle, message) = + session.bind_and_sign_capability(intersection_handle, capability)?; + if let Some(message) = message { + session.send(message).await?; + } + + for area_of_interest in aois { + let msg = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, + }; + // TODO: We could skip the clone if we re-enabled sending by reference. + session.bind_area_of_interest(Scope::Ours, msg.clone())?; + session.send(msg).await?; + } + } + debug!("setup done"); + Ok(()) +} From 23a70b1825a61825a3c658b533326707b5e701b6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 15 May 2024 19:52:12 +0200 Subject: [PATCH 040/198] wip: more stores --- iroh-willow/src/actor.rs | 44 +++++---- iroh-willow/src/net.rs | 8 +- iroh-willow/src/proto/meadowcap.rs | 50 +++++++++- iroh-willow/src/proto/wgps.rs | 21 +++-- iroh-willow/src/session/reconciler.rs | 25 +++-- iroh-willow/src/session/run.rs | 24 +++-- iroh-willow/src/session/state.rs | 80 ++++++++-------- iroh-willow/src/store.rs | 127 +++++++++++++++++++++----- 8 files changed, 260 insertions(+), 119 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index fc395cd47d..185da3e67f 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -1,7 +1,7 @@ -use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc, thread::JoinHandle}; +use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; -use futures_util::future::{FutureExt, Shared}; +use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; use tokio::sync::oneshot; use tracing::{debug, error, error_span, trace, warn, Instrument}; @@ -15,7 +15,7 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{self, Channels, Error, Role, Session, SessionInit}, - store::{KeyStore, Store}, + store::{KeyStore, ReadonlyStore, Shared, Store}, util::task_set::{TaskKey, TaskMap}, }; @@ -30,7 +30,7 @@ pub struct ActorHandle { } impl ActorHandle { - pub fn spawn(store: S, me: NodeId) -> ActorHandle { + pub fn spawn(store: S, key_store: K, me: NodeId) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); let join_handle = std::thread::Builder::new() .name("willow-actor".to_string()) @@ -39,7 +39,8 @@ impl ActorHandle { let _guard = span.enter(); let actor = StorageThread { - store: Rc::new(RefCell::new(store)), + store: Shared::new(store), + key_store: Shared::new(key_store), sessions: Default::default(), inbox_rx: rx, session_tasks: Default::default(), @@ -140,7 +141,7 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { - on_finish: Shared>>>, + on_finish: future::Shared>>>, } impl SessionHandle { @@ -190,14 +191,15 @@ struct ActiveSession { } #[derive(Debug)] -pub struct StorageThread { +pub struct StorageThread { inbox_rx: flume::Receiver, - store: Rc>, + store: Shared, + key_store: Shared, sessions: HashMap, session_tasks: TaskMap>, } -impl StorageThread { +impl StorageThread { pub fn run(self) -> anyhow::Result<()> { let rt = tokio::runtime::Builder::new_current_thread() .build() @@ -244,13 +246,18 @@ impl StorageThread { } => { let session_id = peer; let Channels { send, recv } = channels; - let session = - Session::new(self.store.clone(), send, our_role, initial_transmission); + let session = Session::new(send, our_role, initial_transmission); let task_key = self.session_tasks.spawn_local( session_id, - session::run(session, recv, init) - .instrument(error_span!("session", peer = %peer.fmt_short())), + session::run( + self.store.clone(), + self.key_store.clone(), + session, + recv, + init, + ) + .instrument(error_span!("session", peer = %peer.fmt_short())), ); let active_session = ActiveSession { on_finish, @@ -263,18 +270,21 @@ impl StorageThread { range, reply, } => { - let store = self.store.borrow(); - let entries = store.get_entries(namespace, &range).filter_map(|r| r.ok()); + // TODO: We don't want to use a snapshot here. + let snapshot = self.store.snapshot()?; + let entries = snapshot + .get_entries(namespace, &range) + .filter_map(|r| r.ok()); for entry in entries { reply.send(entry).ok(); } } ToActor::IngestEntry { entry, reply } => { - let res = self.store.borrow_mut().ingest_entry(&entry); + let res = self.store.ingest_entry(&entry); reply.send(res).ok(); } ToActor::InsertSecret { secret, reply } => { - let res = self.store.borrow_mut().key_store().insert(secret); + let res = self.key_store.insert(secret); reply.send(res.map_err(anyhow::Error::from)).ok(); } } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9c0b98a35d..13998b5fbd 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -291,7 +291,7 @@ mod tests { willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Role, SessionInit}, - store::MemoryStore, + store::{MemoryKeyStore, MemoryStore}, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -345,10 +345,12 @@ mod tests { let mut expected_entries = BTreeSet::new(); let store_alfie = MemoryStore::default(); - let handle_alfie = ActorHandle::spawn(store_alfie, node_id_alfie); + let keys_alfie = MemoryKeyStore::default(); + let handle_alfie = ActorHandle::spawn(store_alfie, keys_alfie, node_id_alfie); let store_betty = MemoryStore::default(); - let handle_betty = ActorHandle::spawn(store_betty, node_id_betty); + let keys_betty = MemoryKeyStore::default(); + let handle_betty = ActorHandle::spawn(store_betty, keys_betty, node_id_betty); let init_alfie = setup_and_insert( &mut rng, diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index d23bd2806d..b525730dbf 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -8,9 +8,11 @@ use super::{ willow::{AuthorisedEntry, Entry, Unauthorised}, }; -pub type UserSignature = keys::UserSignature; pub type UserPublicKey = keys::UserPublicKey; pub type NamespacePublicKey = keys::NamespacePublicKey; +pub type UserId = keys::UserId; +pub type NamespaceId = keys::NamespaceId; +pub type UserSignature = keys::UserSignature; pub type NamespaceSignature = keys::NamespaceSignature; #[derive(Debug, derive_more::From)] @@ -19,6 +21,18 @@ pub enum SecretKey { Namespace(NamespaceSecretKey), } +// #[derive(Debug, derive_more::From)] +// pub enum PublicKey { +// User(UserPublicKey), +// Namespace(NamespacePublicKey), +// } +// +// #[derive(Debug, derive_more::From)] +// pub enum PublicKeyId { +// User(UserId), +// Namespace(NamespaceId), +// } + pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { let (capability, signature) = token.as_parts(); @@ -108,6 +122,27 @@ impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { } } +#[derive(Debug, Clone, derive_more::Deref, derive_more::Into)] +pub struct ValidatedCapability(McCapability); + +impl ValidatedCapability { + pub fn new(cap: McCapability) -> Result { + if cap.is_valid() { + Ok(Self(cap)) + } else { + Err(InvalidCapability) + } + } + + pub fn is_valid(&self) -> bool { + true + } + + pub fn new_unchecked(cap: McCapability) -> Self { + Self(cap) + } +} + #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] pub enum McCapability { Communal(CommunalCapability), @@ -164,6 +199,19 @@ impl McCapability { } } +impl Encoder for McCapability { + // TODO: Use spec-compliant encoding instead of postcard. + fn encoded_len(&self) -> usize { + postcard::experimental::serialized_size(&self).unwrap() + } + + // TODO: Use spec-compliant encoding instead of postcard. + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + postcard::to_io(&self, out)?; + Ok(()) + } +} + #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] pub enum AccessMode { Read, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index b9208f1c88..f3acc0a797 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -35,6 +35,7 @@ pub type AccessChallenge = [u8; CHALLENGE_LENGTH]; // which together yield a MeadowcapAuthorisationToken. pub type StaticToken = meadowcap::McCapability; +pub type ValidatedStaticToken = meadowcap::ValidatedCapability; pub type DynamicToken = meadowcap::UserSignature; /// Whereas write access control is baked into the Willow data model, @@ -569,16 +570,16 @@ pub struct ReconciliationSendEntry { pub dynamic_token: DynamicToken, } -impl ReconciliationSendEntry { - pub fn into_authorised_entry( - self, - static_token: StaticToken, - ) -> Result { - let authorisation_token = AuthorisationToken::from_parts(static_token, self.dynamic_token); - let entry = PossiblyAuthorisedEntry::new(self.entry.entry, authorisation_token); - entry.authorise() - } -} +// impl ReconciliationSendEntry { +// pub fn into_authorised_entry( +// self, +// static_token: StaticToken, +// ) -> Result { +// let authorisation_token = AuthorisationToken::from_parts(static_token, self.dynamic_token); +// let entry = PossiblyAuthorisedEntry::new(self.entry.entry, authorisation_token); +// entry.authorise() +// } +// } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 0e8cb29569..5907b3b771 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,5 +1,3 @@ -use std::rc::Rc; - use futures_lite::StreamExt; use tracing::{debug, trace}; @@ -15,26 +13,29 @@ use crate::{ willow::AuthorisedEntry, }, session::{channels::MessageReceiver, state::AreaOfInterestIntersection, Error, Session}, - store::{ReadonlyStore, SplitAction, Store, SyncConfig}, + store::{ReadonlyStore, Shared, SplitAction, Store, SyncConfig}, util::channel::WriteError, }; #[derive(derive_more::Debug)] pub struct Reconciler { - session: Session, + session: Session, + store: Shared, recv: MessageReceiver, - snapshot: Rc, + snapshot: S::Snapshot, } impl Reconciler { pub fn new( - session: Session, + session: Session, + store: Shared, recv: MessageReceiver, ) -> Result { - let snapshot = session.store().snapshot()?; + let snapshot = store.snapshot()?; Ok(Self { recv, - snapshot: Rc::new(snapshot), + store, + snapshot, session, }) } @@ -187,7 +188,7 @@ impl Reconciler { message.dynamic_token, )?; - self.session.store().ingest_entry(&authorised_entry)?; + self.store.ingest_entry(&authorised_entry)?; Ok(()) } @@ -273,10 +274,8 @@ impl Reconciler { // TODO: expose this config let config = SyncConfig::default(); // clone to avoid borrow checker trouble - let store_snapshot = Rc::clone(&self.snapshot); - let mut iter = store_snapshot - .split_range(namespace, &range, &config)? - .peekable(); + let snapshot = self.snapshot.clone(); + let mut iter = snapshot.split_range(namespace, &range, &config)?.peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; let is_last = iter.peek().is_none(); diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 579fb05e7d..3d02a2a67e 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -5,7 +5,7 @@ use tracing::{debug, error_span}; use crate::{ proto::wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, - store::Store, + store::{KeyStore, Shared, Store}, util::channel::Receiver, }; @@ -13,8 +13,10 @@ use super::{channels::ChannelReceivers, reconciler::Reconciler}; const INITIAL_GUARANTEES: u64 = u64::MAX; -pub async fn run( - session: Session, +pub async fn run( + store: Shared, + key_store: Shared, + session: Session, recv: ChannelReceivers, init: SessionInit, ) -> Result<(), Error> { @@ -55,12 +57,12 @@ pub async fn run( // Spawn a task to handle reconciliation messages session.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, reconciliation_recv)?.run().await + Reconciler::new(session, store, reconciliation_recv)?.run().await }); // Spawn a task to handle control messages session.spawn(tracing::Span::current(), move |session| async move { - control_loop(session, control_recv, init).await + control_loop(session, key_store, control_recv, init).await }); // Loop over task completions, break on failure or if reconciliation completed @@ -91,8 +93,9 @@ pub async fn run( Ok(()) } -async fn control_loop( - session: Session, +async fn control_loop( + session: Session, + key_store: Shared, mut control_recv: Receiver, init: SessionInit, ) -> Result<(), Error> { @@ -119,7 +122,8 @@ async fn control_loop( session.on_commitment_reveal(msg)?; let init = init.take().ok_or(Error::InvalidMessageInCurrentState)?; // send setup messages, but in a separate task to not block incoming guarantees - session.spawn(error_span!("setup"), |session| setup(session, init)); + let key_store = key_store.clone(); + session.spawn(error_span!("setup"), |session| setup(key_store, session, init)); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; @@ -133,13 +137,13 @@ async fn control_loop( Ok(()) } -async fn setup(session: Session, init: SessionInit) -> Result<(), Error> { +async fn setup(key_store: Shared, session: Session, init: SessionInit) -> Result<(), Error> { debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { // TODO: implement private area intersection let intersection_handle = 0.into(); let (our_capability_handle, message) = - session.bind_and_sign_capability(intersection_handle, capability)?; + session.bind_and_sign_capability(&key_store, intersection_handle, capability)?; if let Some(message) = message { session.send(message).await?; } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 1cdac5fe7e..65cb50cd68 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,5 +1,5 @@ use std::{ - cell::{RefCell, RefMut}, + cell::{Ref, RefCell, RefMut}, collections::{HashSet, VecDeque}, future::poll_fn, pin::Pin, @@ -23,7 +23,7 @@ use crate::{ SetupBindStaticToken, StaticToken, StaticTokenHandle, }, }, - store::{KeyStore, Store}, + store::{KeyStore, Shared}, util::{channel::WriteError, task_set::TaskMap}, }; @@ -33,27 +33,26 @@ use super::{ Error, Role, Scope, }; -#[derive(Debug, derive_more::Deref)] -pub struct Session(Rc>); +#[derive(Debug, Clone)] +pub struct Session(Rc); -impl Clone for Session { - fn clone(&self) -> Self { - Self(Rc::clone(&self.0)) - } -} +// impl std::ops::Deref for Session { +// type Target = SessionInner; +// fn deref(&self) -> &Self::Target { +// &self.0 +// } +// } #[derive(derive_more::Debug)] -pub struct SessionInner { +struct SessionInner { state: RefCell, send: ChannelSenders, #[debug("Store")] - store: Rc>, tasks: RefCell>>, } -impl Session { +impl Session { pub fn new( - store: Rc>, send: ChannelSenders, our_role: Role, initial_transmission: InitialTransmission, @@ -62,25 +61,24 @@ impl Session { Self(Rc::new(SessionInner { state: RefCell::new(state), send, - store, tasks: Default::default(), })) } pub fn spawn(&self, span: Span, f: F) where - F: FnOnce(Session) -> Fut, + F: FnOnce(Session) -> Fut, Fut: std::future::Future> + 'static, { let state = self.clone(); let fut = f(state); let fut = fut.instrument(span.clone()); - self.tasks.borrow_mut().spawn_local(span, fut); + self.0.tasks.borrow_mut().spawn_local(span, fut); } pub async fn join_next_task(&self) -> Option<(Span, Result<(), Error>)> { poll_fn(|cx| { - let mut tasks = self.tasks.borrow_mut(); + let mut tasks = self.0.tasks.borrow_mut(); let res = std::task::ready!(Pin::new(&mut tasks).poll_next(cx)); let res = match res { None => None, @@ -108,12 +106,12 @@ impl Session { } pub fn our_role(&self) -> Role { - self.state.borrow().our_role + self.0.state.borrow().our_role } pub async fn next_aoi_intersection(&self) -> Option { poll_fn(|cx| { - let mut aoi_queue = &mut self.state.borrow_mut().aoi_queue; + let mut aoi_queue = &mut self.0.state.borrow_mut().aoi_queue; Pin::new(&mut aoi_queue).poll_next(cx) }) .await @@ -127,7 +125,7 @@ impl Session { where F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, { - let inner = Rc::clone(&self); + let inner = &self.clone().0; poll_fn(move |cx| { let mut inner = inner.state.borrow_mut(); let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).their_resources); @@ -137,18 +135,15 @@ impl Session { .await } - pub fn bind_and_sign_capability( + pub fn bind_and_sign_capability( &self, + key_store: &Shared, our_intersection_handle: IntersectionHandle, capability: ReadCapability, ) -> Result<(CapabilityHandle, Option), Error> { - let mut inner = self.state.borrow_mut(); + let mut inner = self.0.state.borrow_mut(); let signable = inner.challenge.signable()?; - let signature = self - .store - .borrow_mut() - .key_store() - .sign_user(&capability.receiver().id(), &signable)?; + let signature = key_store.sign_user(&capability.receiver().id(), &signable)?; let (our_handle, is_new) = inner .our_resources @@ -166,7 +161,7 @@ impl Session { &self, message: &ReconciliationAnnounceEntries, ) -> Result { - let mut state = self.state.borrow_mut(); + let mut state = self.state_mut(); state.clear_pending_range_if_some( message.receiver_handle, message.is_final_reply_for_range.as_ref(), @@ -189,7 +184,7 @@ impl Session { &self, message: &ReconciliationSendFingerprint, ) -> Result { - let mut state = self.state.borrow_mut(); + let mut state = self.state_mut(); state.reconciliation_started = true; state.clear_pending_range_if_some( message.receiver_handle, @@ -204,8 +199,7 @@ impl Session { } pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { - self.state - .borrow_mut() + self.state_mut() .their_resources .static_tokens .bind(msg.static_token); @@ -214,7 +208,7 @@ impl Session { pub fn on_setup_bind_read_capability(&self, msg: SetupBindReadCapability) -> Result<(), Error> { // TODO: verify intersection handle msg.capability.validate()?; - let mut state = self.state.borrow_mut(); + let mut state = self.state_mut(); state .challenge .verify(msg.capability.receiver(), &msg.signature)?; @@ -229,14 +223,14 @@ impl Session { // self.pending_ranges.len(), // self.pending_entries.is_some() // ); - let state = self.state.borrow(); + let state = self.state(); state.reconciliation_started && state.pending_ranges.is_empty() && state.pending_entries.is_none() } pub fn reveal_commitment(&self) -> Result { - let state = self.state.borrow(); + let state = self.state(); match state.challenge { ChallengeState::Committed { our_nonce, .. } => { Ok(CommitmentReveal { nonce: our_nonce }) @@ -246,7 +240,7 @@ impl Session { } pub fn on_commitment_reveal(&self, msg: CommitmentReveal) -> Result<(), Error> { - let mut state = self.state.borrow_mut(); + let mut state = self.state_mut(); let our_role = state.our_role; state.challenge.reveal(our_role, msg.nonce) } @@ -256,8 +250,7 @@ impl Session { scope: Scope, message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - self.state - .borrow_mut() + self.state_mut() .bind_area_of_interest(scope, message) } @@ -272,24 +265,27 @@ impl Session { } pub fn on_send_entry(&self) -> Result<(), Error> { - self.state.borrow_mut().on_send_entry() + self.state_mut().on_send_entry() } pub fn bind_our_static_token( &self, token: StaticToken, ) -> (StaticTokenHandle, Option) { - self.state.borrow_mut().bind_our_static_token(token) + self.state_mut().bind_our_static_token(token) } pub fn insert_pending_range(&self, our_handle: AreaOfInterestHandle, range: ThreeDRange) { - let mut state = self.state.borrow_mut(); + let mut state = self.state_mut(); state.reconciliation_started = true; state.pending_ranges.insert((our_handle, range)); } - pub fn store(&self) -> RefMut { - self.store.borrow_mut() + fn state(&self) -> Ref { + self.0.state.borrow() + } + fn state_mut(&self) -> RefMut { + self.0.state.borrow_mut() } } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 8926091785..788a7a566d 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,13 +1,22 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + cell::RefCell, + collections::{hash_map, HashMap}, + io::Cursor, + rc::Rc, + sync::Arc, +}; use anyhow::Result; -use crate::proto::{ - grouping::{Range, RangeEnd, ThreeDRange}, - keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap, - wgps::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId}, +use crate::{ + proto::{ + grouping::{Range, RangeEnd, ThreeDRange}, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap::{self, InvalidCapability, }, + wgps::{Fingerprint, StaticToken, ValidatedStaticToken}, + willow::{ AuthorisedEntry, Entry, NamespaceId}, + }, + util::Encoder, }; #[derive(Debug, Clone, Copy)] @@ -51,6 +60,13 @@ pub trait KeyStore: Send + 'static { ) -> Result; } +pub trait Store: ReadonlyStore + 'static { + type Snapshot: ReadonlyStore + Clone + Send; + + fn snapshot(&mut self) -> Result; + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result; +} + pub trait ReadonlyStore: Send + 'static { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; @@ -79,20 +95,81 @@ pub trait ReadonlyStore: Send + 'static { } } -pub trait Store: ReadonlyStore + 'static { - type Snapshot: ReadonlyStore + Send; - type KeyStore: KeyStore; +#[derive(Debug)] +pub struct Shared(Rc>); - fn snapshot(&mut self) -> Result; - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result; - fn key_store(&mut self) -> &mut Self::KeyStore; +impl Clone for Shared { + fn clone(&self) -> Self { + Self(Rc::clone(&self.0)) + } } -/// A very inefficient in-memory store, for testing purposes only -#[derive(Debug, Default)] -pub struct MemoryStore { - entries: HashMap>, - keys: MemoryKeyStore, +impl Shared { + pub fn new(inner: S) -> Self { + Self(Rc::new(RefCell::new(inner))) + } +} + +impl Shared { + pub fn snapshot(&self) -> Result { + Ok(self.0.borrow_mut().snapshot()?) + } + + pub fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { + self.0.borrow_mut().ingest_entry(entry) + } + pub fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + self.0.borrow().fingerprint(namespace, range) + } + + // pub fn split_range( + // &self, + // namespace: NamespaceId, + // range: &ThreeDRange, + // config: &SyncConfig, + // ) -> Result>> { + // let this = self.clone(); + // this.0.borrow().split_range(namespace, range, config) + // } + // + // pub fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + // self.0.borrow().count(namespace, range) + // } + // + // pub fn get_entries_with_authorisation<'a>( + // &'a self, + // namespace: NamespaceId, + // range: &ThreeDRange, + // ) -> impl Iterator> + 'a { + // self.0.borrow().count(namespace, range) + // } + // + // fn get_entries<'a>( + // &'a self, + // namespace: NamespaceId, + // range: &ThreeDRange, + // ) -> impl Iterator> + 'a { + // self.get_entries_with_authorisation(namespace, range) + // .map(|e| e.map(|e| e.into_entry())) + // } +} + +impl Shared { + pub fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError> { + self.0.borrow_mut().insert(secret) + } + + pub fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { + self.0.borrow().sign_user(id, message) + } + + pub fn sign_namespace( + &self, + id: &NamespaceId, + message: &[u8], + ) -> Result { + self.0.borrow().sign_namespace(id, message) + } } #[derive(Debug, Default)] @@ -134,6 +211,11 @@ impl KeyStore for MemoryKeyStore { } } +#[derive(Debug, Default)] +pub struct MemoryStore { + entries: HashMap>, +} + impl ReadonlyStore for MemoryStore { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { let mut fingerprint = Fingerprint::default(); @@ -264,16 +346,15 @@ impl ReadonlyStore for Arc { impl Store for MemoryStore { type Snapshot = Arc; - type KeyStore = MemoryKeyStore; + // type KeyStore = MemoryKeyStore; fn snapshot(&mut self) -> Result { Ok(Arc::new(Self { entries: self.entries.clone(), - keys: Default::default(), })) } - fn key_store(&mut self) -> &mut Self::KeyStore { - &mut self.keys - } + // fn key_store(&mut self) -> &mut Self::KeyStore { + // &mut self.keys + // } fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); From 94e6326a03127f97cdc2fa3faf08497a01d86f55 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 16 May 2024 23:47:33 +0200 Subject: [PATCH 041/198] finish keystore impl, cleanups --- iroh-willow/src/proto/wgps.rs | 16 +--------------- iroh-willow/src/session/run.rs | 14 +++++++++++--- iroh-willow/src/session/state.rs | 11 +---------- iroh-willow/src/store.rs | 23 +++++++---------------- 4 files changed, 20 insertions(+), 44 deletions(-) diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index f3acc0a797..bd64818da9 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -10,10 +10,7 @@ use crate::util::{DecodeOutcome, Decoder, Encoder}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, meadowcap, - willow::{ - AuthorisationToken, AuthorisedEntry, Entry, PossiblyAuthorisedEntry, Unauthorised, - DIGEST_LENGTH, - }, + willow::{Entry, DIGEST_LENGTH}, }; pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; @@ -570,17 +567,6 @@ pub struct ReconciliationSendEntry { pub dynamic_token: DynamicToken, } -// impl ReconciliationSendEntry { -// pub fn into_authorised_entry( -// self, -// static_token: StaticToken, -// ) -> Result { -// let authorisation_token = AuthorisationToken::from_parts(static_token, self.dynamic_token); -// let entry = PossiblyAuthorisedEntry::new(self.entry.entry, authorisation_token); -// entry.authorise() -// } -// } - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { /// The Entry in question. diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 3d02a2a67e..c908c7f76f 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -57,7 +57,9 @@ pub async fn run( // Spawn a task to handle reconciliation messages session.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, store, reconciliation_recv)?.run().await + Reconciler::new(session, store, reconciliation_recv)? + .run() + .await }); // Spawn a task to handle control messages @@ -123,7 +125,9 @@ async fn control_loop( let init = init.take().ok_or(Error::InvalidMessageInCurrentState)?; // send setup messages, but in a separate task to not block incoming guarantees let key_store = key_store.clone(); - session.spawn(error_span!("setup"), |session| setup(key_store, session, init)); + session.spawn(error_span!("setup"), |session| { + setup(key_store, session, init) + }); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; @@ -137,7 +141,11 @@ async fn control_loop( Ok(()) } -async fn setup(key_store: Shared, session: Session, init: SessionInit) -> Result<(), Error> { +async fn setup( + key_store: Shared, + session: Session, + init: SessionInit, +) -> Result<(), Error> { debug!(interests = init.interests.len(), "start setup"); for (capability, aois) in init.interests.into_iter() { // TODO: implement private area intersection diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 65cb50cd68..37f79d1de8 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -36,18 +36,10 @@ use super::{ #[derive(Debug, Clone)] pub struct Session(Rc); -// impl std::ops::Deref for Session { -// type Target = SessionInner; -// fn deref(&self) -> &Self::Target { -// &self.0 -// } -// } - #[derive(derive_more::Debug)] struct SessionInner { state: RefCell, send: ChannelSenders, - #[debug("Store")] tasks: RefCell>>, } @@ -250,8 +242,7 @@ impl Session { scope: Scope, message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - self.state_mut() - .bind_area_of_interest(scope, message) + self.state_mut().bind_area_of_interest(scope, message) } pub async fn on_bind_area_of_interest( diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 788a7a566d..fbe8cf3a7d 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,22 +1,13 @@ -use std::{ - cell::RefCell, - collections::{hash_map, HashMap}, - io::Cursor, - rc::Rc, - sync::Arc, -}; +use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc}; use anyhow::Result; -use crate::{ - proto::{ - grouping::{Range, RangeEnd, ThreeDRange}, - keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap::{self, InvalidCapability, }, - wgps::{Fingerprint, StaticToken, ValidatedStaticToken}, - willow::{ AuthorisedEntry, Entry, NamespaceId}, - }, - util::Encoder, +use crate::proto::{ + grouping::{Range, RangeEnd, ThreeDRange}, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap::{self}, + wgps::Fingerprint, + willow::{AuthorisedEntry, Entry, NamespaceId}, }; #[derive(Debug, Clone, Copy)] From fbc00172cd31d84ba34e758b205129e9f8d73328 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 16 May 2024 23:55:15 +0200 Subject: [PATCH 042/198] add ReconciliationSendPayload and ReconciliationTerminatePayload message structs --- iroh-willow/src/proto/wgps.rs | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index bd64818da9..23a3d489e7 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -272,6 +272,10 @@ pub enum Message { ReconciliationAnnounceEntries(ReconciliationAnnounceEntries), #[debug("{:?}", _0)] ReconciliationSendEntry(ReconciliationSendEntry), + #[debug("{:?}", _0)] + ReconciliationSendPayload(ReconciliationSendPayload), + #[debug("{:?}", _0)] + ReconciliationTerminatePayload(ReconciliationTerminatePayload), // DataSendEntry // DataSendPayload // DataSetMetadata @@ -340,14 +344,16 @@ impl Decoder for Message { impl Message { pub fn channel(&self) -> Channel { match self { + Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), + Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), + Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), Message::ReconciliationSendFingerprint(_) | Message::ReconciliationAnnounceEntries(_) - | Message::ReconciliationSendEntry(_) => { + | Message::ReconciliationSendEntry(_) + | Message::ReconciliationSendPayload(_) + | Message::ReconciliationTerminatePayload(_) => { Channel::Logical(LogicalChannel::Reconciliation) } - Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), - Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), - Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), Message::CommitmentReveal(_) | Message::ControlIssueGuarantee(_) | Message::ControlAbsolve(_) @@ -567,6 +573,18 @@ pub struct ReconciliationSendEntry { pub dynamic_token: DynamicToken, } +/// Transmit some transformed Payload bytes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReconciliationSendPayload { + // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. + bytes: bytes::Bytes, +} + +/// Indicate that no more bytes will be transmitted for the currently transmitted Payload as part of set reconciliation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReconciliationTerminatePayload; + +/// An Entry together with information about how much of its Payload a peer holds. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { /// The Entry in question. From 5947b749ff1ab59d27ab9a010d742036dabc0c88 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 18 May 2024 00:10:01 +0200 Subject: [PATCH 043/198] another round of improvements: * spec compliant range covers * fix test * cleanup --- iroh-willow/src/actor.rs | 29 ++--- iroh-willow/src/net.rs | 43 +++---- iroh-willow/src/proto/wgps.rs | 18 ++- iroh-willow/src/session.rs | 43 +++++-- iroh-willow/src/session/reconciler.rs | 64 ++++++----- iroh-willow/src/session/run.rs | 156 +++++++++++++------------- iroh-willow/src/session/state.rs | 120 ++++++++++++++------ iroh-willow/src/util/task_set.rs | 4 + 8 files changed, 273 insertions(+), 204 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 185da3e67f..574144a3c3 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -14,14 +14,14 @@ use crate::{ meadowcap, willow::{AuthorisedEntry, Entry}, }, - session::{self, Channels, Error, Role, Session, SessionInit}, + session::{Channels, Error, Role, Session, SessionInit}, store::{KeyStore, ReadonlyStore, Shared, Store}, util::task_set::{TaskKey, TaskMap}, }; pub const INBOX_CAP: usize = 1024; -pub type SessionId = NodeId; +pub type SessionId = u64; #[derive(Debug, Clone)] pub struct ActorHandle { @@ -43,6 +43,7 @@ impl ActorHandle { key_store: Shared::new(key_store), sessions: Default::default(), inbox_rx: rx, + next_session_id: 0, session_tasks: Default::default(), }; if let Err(error) = actor.run() { @@ -186,6 +187,8 @@ pub enum ToActor { #[derive(Debug)] struct ActiveSession { + #[allow(unused)] + peer: NodeId, on_finish: oneshot::Sender>, task_key: TaskKey, // state: SharedSessionState } @@ -195,6 +198,7 @@ pub struct StorageThread { inbox_rx: flume::Receiver, store: Shared, key_store: Shared, + next_session_id: u64, sessions: HashMap, session_tasks: TaskMap>, } @@ -244,26 +248,23 @@ impl StorageThread { init, on_finish, } => { - let session_id = peer; + let id = self.next_session_id; + self.next_session_id += 1; let Channels { send, recv } = channels; let session = Session::new(send, our_role, initial_transmission); let task_key = self.session_tasks.spawn_local( - session_id, - session::run( - self.store.clone(), - self.key_store.clone(), - session, - recv, - init, - ) - .instrument(error_span!("session", peer = %peer.fmt_short())), + id, + session + .run(self.store.clone(), self.key_store.clone(), recv, init) + .instrument(error_span!("session", peer = %peer.fmt_short())), ); let active_session = ActiveSession { on_finish, task_key, + peer, }; - self.sessions.insert(session_id, active_session); + self.sessions.insert(id, active_session); } ToActor::GetEntries { namespace, @@ -291,7 +292,7 @@ impl StorageThread { Ok(()) } - fn complete_session(&mut self, session_id: &NodeId, result: Result<(), Error>) { + fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { let session = self.sessions.remove(session_id); if let Some(session) = session { session.on_finish.send(result).ok(); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 13998b5fbd..2c839742ca 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -7,7 +7,7 @@ use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, }; -use tracing::{debug, error_span, instrument, trace, warn, Instrument}; +use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrument, Span}; use crate::{ actor::ActorHandle, @@ -29,16 +29,17 @@ use crate::{ pub const CHANNEL_CAP: usize = 1024 * 64; -#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=%peer.fmt_short()))] +#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] pub async fn run( me: NodeId, actor: ActorHandle, conn: Connection, - peer: NodeId, our_role: Role, init: SessionInit, ) -> anyhow::Result<()> { debug!(?our_role, "connected"); + let peer = iroh_net::magic_endpoint::get_remote_node_id(&conn)?; + Span::current().record("peer", peer.fmt_short()); let mut join_set = JoinSet::new(); let (mut control_send_stream, mut control_recv_stream) = match our_role { @@ -80,6 +81,7 @@ pub async fn run( join_set.spawn(async move { handle.on_finish().await?; + tracing::info!("session finished"); Ok(()) }); @@ -207,6 +209,7 @@ async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> a trace!(len = buf.bytes.len(), "recv"); } channel_writer.close(); + debug!("closed"); Ok(()) } @@ -216,7 +219,9 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyho send_stream.write_chunk(data).await?; trace!(len, "sent"); } + debug!("close"); send_stream.finish().await?; + debug!("closed"); Ok(()) } @@ -253,7 +258,10 @@ pub struct InitialTransmission { async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { let mut final_result = Ok(()); + let mut joined = 0; while let Some(res) = join_set.join_next().await { + joined += 1; + tracing::info!("joined {joined} remaining {}", join_set.len()); let res = match res { Ok(Ok(())) => Ok(()), Ok(Err(err)) => Err(err), @@ -408,29 +416,20 @@ mod tests { node_id_alfie, handle_alfie.clone(), conn_alfie, - node_id_betty, Role::Alfie, init_alfie ) - .map(|res| { - info!("alfie done: {res:?}"); - res - }), + .inspect(|res| info!("alfie done: {res:?}")), run( node_id_betty, handle_betty.clone(), conn_betty, - node_id_alfie, Role::Betty, init_betty ) - .map(|res| { - info!("betty done: {res:?}"); - res - }), + .inspect(|res| info!("betty done: {res:?}")), ); - info!(time=?start.elapsed(), "reconciliation finished!"); - println!("reconciliation took {:?}", start.elapsed()); + info!(time=?start.elapsed(), "reconciliation finished"); info!("alfie res {:?}", res_alfie); info!("betty res {:?}", res_betty); @@ -442,23 +441,13 @@ mod tests { // "betty store {:?}", // get_entries_debug(&handle_betty, namespace_id).await? // ); - assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - // assert_eq!( - // get_entries(&handle_alfie, namespace_id).await?, - // expected_entries, - // "alfie expected entries" - // ); - // assert_eq!( - // get_entries(&handle_betty, namespace_id).await?, - // expected_entries, - // "bettyexpected entries" - // ); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; - let betty_entries = get_entries(&handle_alfie, namespace_id).await?; + let betty_entries = get_entries(&handle_betty, namespace_id).await?; info!("alfie has now {} entries", alfie_entries.len()); info!("betty has now {} entries", betty_entries.len()); + // not using assert_eq because it would print a lot in case of failure assert!(alfie_entries == expected_entries, "alfie expected entries"); assert!(betty_entries == expected_entries, "betty expected entries"); diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 23a3d489e7..0b09a1dc6d 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -533,11 +533,10 @@ pub struct ReconciliationSendFingerprint { pub sender_handle: AreaOfInterestHandle, /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. pub receiver_handle: AreaOfInterestHandle, - /// If this is this the last reply to range received via [`ReconciliationSendFingerprint`] or [`ReconciliationAnnounceEntries`] - /// from the other peer, set to that range to indicate to the other peer that no further replies for that range will be sent - /// - /// TODO: This is a spec deviation, discuss further and remove or upstream - pub is_final_reply_for_range: Option, + /// If this message is the last of a set of messages that together cover the range of some prior + /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that + /// [`ReconciliationSendFingerprint`] message. Otherwise, none. + pub covers: Option, } /// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. @@ -555,11 +554,10 @@ pub struct ReconciliationAnnounceEntries { pub sender_handle: AreaOfInterestHandle, /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. pub receiver_handle: AreaOfInterestHandle, - /// If this is this the last reply to range received via [`ReconciliationSendFingerprint`] or [`ReconciliationAnnounceEntries`] - /// from the other peer, set to that range to indicate to the other peer that no further replies for that range will be sent - /// - /// TODO: This is a spec deviation, discuss further and remove or upstream - pub is_final_reply_for_range: Option, + /// If this message is the last of a set of messages that together cover the range of some prior + /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that + /// [`ReconciliationSendFingerprint`] message. Otherwise, none. + pub covers: Option, } /// Transmit a LengthyEntry as part of 3d range-based set reconciliation. diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index ee27ec4dc3..907907c1b6 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; +use crate::proto::wgps::{AccessChallenge, ChallengeHash}; use crate::proto::{grouping::AreaOfInterest, wgps::ReadCapability}; pub mod channels; @@ -12,9 +13,21 @@ mod util; pub use self::channels::Channels; pub use self::error::Error; -pub use self::run::run; pub use self::state::Session; +/// Data from the initial transmission +/// +/// This happens before the session is initialized. +#[derive(Debug)] +pub struct InitialTransmission { + /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. + pub our_nonce: AccessChallenge, + /// The [`ChallengeHash`] we received from the remote. + pub received_commitment: ChallengeHash, + /// The maximum payload size we received from the remote. + pub their_max_payload_size: u64, +} + /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. #[derive(Debug, Clone, Copy, Eq, PartialEq)] @@ -26,34 +39,40 @@ pub enum Role { } impl Role { + /// Returns `true` if we initiated the session. pub fn is_alfie(&self) -> bool { matches!(self, Role::Alfie) } + /// Returns `true` if we accepted the session. pub fn is_betty(&self) -> bool { matches!(self, Role::Betty) } } -/// The bind scope for resources. -/// -/// Resources are bound by either peer -#[derive(Copy, Clone, Debug)] -pub enum Scope { - /// Resources bound by ourselves. - Ours, - /// Resources bound by the other peer. - Theirs, -} - +/// Options to initialize a session with. #[derive(Debug)] pub struct SessionInit { + /// List of interests we wish to synchronize, together with our capabilities to read them. pub interests: HashMap>, } impl SessionInit { + /// Returns a [`SessionInit`] with a single interest. pub fn with_interest(capability: ReadCapability, area_of_interest: AreaOfInterest) -> Self { Self { interests: HashMap::from_iter([(capability, HashSet::from_iter([area_of_interest]))]), } } } + +/// The bind scope for resources. +/// +/// Resources are bound by either peer +#[derive(Copy, Clone, Debug)] +pub enum Scope { + /// Resources bound by ourselves. + Ours, + /// Resources bound by the other peer. + Theirs, +} + diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 5907b3b771..85b7a37411 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -95,14 +95,14 @@ impl Reconciler { &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { - let namespace = self.session.on_send_fingerprint(&message)?; + let (namespace, range_count) = self.session.on_send_fingerprint(&message)?; trace!("on_send_fingerprint start"); let ReconciliationSendFingerprint { range, fingerprint: their_fingerprint, sender_handle: their_handle, receiver_handle: our_handle, - is_final_reply_for_range: _, + covers: _, // handled by Session::on_send_finerprint } = message; let our_fingerprint = self.snapshot.fingerprint(namespace, &range)?; @@ -116,7 +116,7 @@ impl Reconciler { will_sort: false, sender_handle: our_handle, receiver_handle: their_handle, - is_final_reply_for_range: Some(range), + covers: Some(range_count), }; self.send(msg).await?; } @@ -128,7 +128,7 @@ impl Reconciler { our_handle, their_handle, true, - Some(range.clone()), + Some(range_count), None, ) .await?; @@ -136,9 +136,16 @@ impl Reconciler { // case 3: fingerprint doesn't match and is non-empty else { // reply by splitting the range into parts unless it is very short - self.split_range_and_send_parts(namespace, &range, our_handle, their_handle) - .await?; + self.split_range_and_send_parts( + namespace, + &range, + our_handle, + their_handle, + range_count, + ) + .await?; } + self.session.their_range_covered(their_handle, range_count); trace!("on_send_fingerprint done"); Ok(()) } @@ -147,7 +154,7 @@ impl Reconciler { message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { trace!("on_announce_entries start"); - let namespace = self.session.on_announce_entries(&message)?; + let (namespace, range_count) = self.session.on_announce_entries(&message)?; let ReconciliationAnnounceEntries { range, count: _, @@ -155,7 +162,7 @@ impl Reconciler { will_sort: _, sender_handle: their_handle, receiver_handle: our_handle, - is_final_reply_for_range: _, + covers: _, } = message; if want_response { @@ -165,7 +172,9 @@ impl Reconciler { our_handle, their_handle, false, - Some(range.clone()), + range_count, + // None, + // Some(range.clone()), None, ) .await?; @@ -199,15 +208,15 @@ impl Reconciler { fingerprint: Fingerprint, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, - is_final_reply_for_range: Option, + covers: Option, ) -> anyhow::Result<()> { - self.session.insert_pending_range(our_handle, range.clone()); + self.session.mark_range_uncovered(our_handle); let msg = ReconciliationSendFingerprint { range, fingerprint, sender_handle: our_handle, receiver_handle: their_handle, - is_final_reply_for_range, + covers, }; self.send(msg).await?; Ok(()) @@ -220,25 +229,25 @@ impl Reconciler { our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, want_response: bool, - is_final_reply_for_range: Option, - our_count: Option, + covers: Option, + our_entry_count: Option, ) -> Result<(), Error> { - if want_response { - self.session.insert_pending_range(our_handle, range.clone()); - } - let our_count = match our_count { + let our_entry_count = match our_entry_count { Some(count) => count, None => self.snapshot.count(namespace, &range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), - count: our_count, + count: our_entry_count, want_response, will_sort: false, // todo: sorted? sender_handle: our_handle, receiver_handle: their_handle, - is_final_reply_for_range, + covers, }; + if want_response { + self.session.mark_range_uncovered(our_handle); + } self.send(msg).await?; for authorised_entry in self .snapshot @@ -270,6 +279,7 @@ impl Reconciler { range: &ThreeDRange, our_handle: AreaOfInterestHandle, their_handle: AreaOfInterestHandle, + range_count: u64, ) -> Result<(), Error> { // TODO: expose this config let config = SyncConfig::default(); @@ -279,7 +289,7 @@ impl Reconciler { while let Some(res) = iter.next() { let (subrange, action) = res?; let is_last = iter.peek().is_none(); - let is_final_reply = is_last.then(|| range.clone()); + let covers = is_last.then(|| range_count); match action { SplitAction::SendEntries(count) => { self.announce_and_send_entries( @@ -288,20 +298,14 @@ impl Reconciler { our_handle, their_handle, true, - is_final_reply, + covers, Some(count), ) .await?; } SplitAction::SendFingerprint(fingerprint) => { - self.send_fingerprint( - subrange, - fingerprint, - our_handle, - their_handle, - is_final_reply, - ) - .await?; + self.send_fingerprint(subrange, fingerprint, our_handle, their_handle, covers) + .await?; } } } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index c908c7f76f..20f6121025 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -13,86 +13,92 @@ use super::{channels::ChannelReceivers, reconciler::Reconciler}; const INITIAL_GUARANTEES: u64 = u64::MAX; -pub async fn run( - store: Shared, - key_store: Shared, - session: Session, - recv: ChannelReceivers, - init: SessionInit, -) -> Result<(), Error> { - let ChannelReceivers { - control_recv, - logical_recv, - } = recv; - let LogicalChannelReceivers { - reconciliation_recv, - mut static_tokens_recv, - mut capability_recv, - mut aoi_recv, - } = logical_recv; - - // Spawn a task to handle incoming static tokens. - session.spawn(error_span!("stt"), move |session| async move { - while let Some(message) = static_tokens_recv.try_next().await? { - session.on_setup_bind_static_token(message); - } - Ok(()) - }); +impl Session { + pub async fn run( + self, + store: Shared, + key_store: Shared, + recv: ChannelReceivers, + init: SessionInit, + ) -> Result<(), Error> { + let ChannelReceivers { + control_recv, + logical_recv, + } = recv; + let LogicalChannelReceivers { + reconciliation_recv, + mut static_tokens_recv, + mut capability_recv, + mut aoi_recv, + } = logical_recv; + + // Spawn a task to handle incoming static tokens. + self.spawn(error_span!("stt"), move |session| async move { + while let Some(message) = static_tokens_recv.try_next().await? { + session.on_setup_bind_static_token(message); + } + Ok(()) + }); - // Spawn a task to handle incoming capabilities. - session.spawn(error_span!("cap"), move |session| async move { - while let Some(message) = capability_recv.try_next().await? { - session.on_setup_bind_read_capability(message)?; - } - Ok(()) - }); + // Spawn a task to handle incoming capabilities. + self.spawn(error_span!("cap"), move |session| async move { + while let Some(message) = capability_recv.try_next().await? { + session.on_setup_bind_read_capability(message)?; + } + Ok(()) + }); - // Spawn a task to handle incoming areas of interest. - session.spawn(error_span!("aoi"), move |session| async move { - while let Some(message) = aoi_recv.try_next().await? { - session.on_bind_area_of_interest(message).await?; + // Spawn a task to handle incoming areas of interest. + self.spawn(error_span!("aoi"), move |session| async move { + while let Some(message) = aoi_recv.try_next().await? { + session.on_bind_area_of_interest(message).await?; + } + Ok(()) + }); + + // Spawn a task to handle reconciliation messages + self.spawn(error_span!("rec"), move |session| async move { + Reconciler::new(session, store, reconciliation_recv)? + .run() + .await + }); + + // Spawn a task to handle control messages + self.spawn(tracing::Span::current(), move |session| async move { + control_loop(session, key_store, control_recv, init).await + }); + + // Loop over task completions, break on failure or if reconciliation completed + while let Some((span, result)) = self.join_next_task().await { + let guard = span.enter(); + debug!( + ?result, + remaining = self.remaining_tasks(), + "task completed" + ); + result?; + // Is this the right place for this check? It would run after each task + // completion, so necessarily including the completion of the reconciliation + // task, which is the only condition in which reconciliation can complete at + // the moment. + // + // TODO: We'll want to emit the completion event back to the application and + // let it decide what to do (stop, keep open) - or pass relevant config in + // SessionInit. + if self.reconciliation_is_complete() { + tracing::debug!("stop self: reconciliation is complete"); + drop(guard); + // break; + + // Close all our send streams. + // + // This makes the networking send loops stop. + self.close_senders(); + } } + Ok(()) - }); - - // Spawn a task to handle reconciliation messages - session.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, store, reconciliation_recv)? - .run() - .await - }); - - // Spawn a task to handle control messages - session.spawn(tracing::Span::current(), move |session| async move { - control_loop(session, key_store, control_recv, init).await - }); - - // Loop over task completions, break on failure or if reconciliation completed - while let Some((span, result)) = session.join_next_task().await { - let guard = span.enter(); - debug!(?result, "task completed"); - result?; - // Is this the right place for this check? It would run after each task - // completion, so necessarily including the completion of the reconciliation - // task, which is the only condition in which reconciliation can complete at - // the moment. - // - // TODO: We'll want to emit the completion event back to the application and - // let it decide what to do (stop, keep open) - or pass relevant config in - // SessionInit. - if session.reconciliation_is_complete() { - tracing::debug!("stop session: reconciliation is complete"); - drop(guard); - break; - } } - - // Close all our send streams. - // - // This makes the networking send loops stop. - session.close_senders(); - - Ok(()) } async fn control_loop( diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 37f79d1de8..bac02d09a2 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -82,6 +82,11 @@ impl Session { .await } + pub fn remaining_tasks(&self) -> usize { + let tasks = self.0.tasks.borrow(); + tasks.len() + } + pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { self.0.send.send(message).await } @@ -149,15 +154,34 @@ impl Session { Ok((our_handle, maybe_message)) } + pub fn mark_range_uncovered(&self, our_handle: AreaOfInterestHandle) { + let mut state = self.state_mut(); + state.reconciliation_started = true; + let range_count = state.our_range_counter; + state.our_uncovered_ranges.insert((our_handle, range_count)); + warn!(?range_count, len=state.our_uncovered_ranges.len(), "SEND FP - INSERT UNCOVERED"); + state.our_range_counter += 1; + } + + pub fn their_range_covered( + &mut self, + their_handle: AreaOfInterestHandle, + their_range_counter: u64, + ) { + let mut state = self.state_mut(); + state + .their_uncovered_ranges + .remove(&(their_handle, their_range_counter)); + } + pub fn on_announce_entries( &self, message: &ReconciliationAnnounceEntries, - ) -> Result { + ) -> Result<(NamespaceId, Option), Error> { let mut state = self.state_mut(); - state.clear_pending_range_if_some( - message.receiver_handle, - message.is_final_reply_for_range.as_ref(), - )?; + if let Some(range_count) = message.covers { + state.our_range_covered(message.receiver_handle, range_count)?; + } if state.pending_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } @@ -169,25 +193,39 @@ impl Session { if message.count != 0 { state.pending_entries = Some(message.count); } - Ok(namespace) + let range_count = if message.want_response { + let range_count = state.their_range_counter; + state.their_range_counter += 1; + Some(range_count) + } else { + None + }; + Ok((namespace, range_count)) } pub fn on_send_fingerprint( &self, message: &ReconciliationSendFingerprint, - ) -> Result { + ) -> Result<(NamespaceId, u64), Error> { let mut state = self.state_mut(); + if let Some(range_counter) = message.covers { + state.our_range_covered(message.receiver_handle, range_counter)?; + } + state.reconciliation_started = true; - state.clear_pending_range_if_some( - message.receiver_handle, - message.is_final_reply_for_range.as_ref(), - )?; + + let range_count = state.their_range_counter; + state.their_range_counter += 1; + // state + // .their_uncovered_ranges + // .insert((message.sender_handle, range_count)); + let namespace = state.range_is_authorised( &message.range, &message.receiver_handle, &message.sender_handle, )?; - Ok(namespace) + Ok((namespace, range_count)) } pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { @@ -209,15 +247,16 @@ impl Session { } pub fn reconciliation_is_complete(&self) -> bool { - // tracing::debug!( - // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {}", - // self.reconciliation_started, - // self.pending_ranges.len(), - // self.pending_entries.is_some() - // ); let state = self.state(); + tracing::debug!( + "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?}", + state.reconciliation_started, + state.our_uncovered_ranges.len(), + state.pending_entries + ); state.reconciliation_started - && state.pending_ranges.is_empty() + && state.our_uncovered_ranges.is_empty() + && state.their_uncovered_ranges.is_empty() && state.pending_entries.is_none() } @@ -266,11 +305,11 @@ impl Session { self.state_mut().bind_our_static_token(token) } - pub fn insert_pending_range(&self, our_handle: AreaOfInterestHandle, range: ThreeDRange) { - let mut state = self.state_mut(); - state.reconciliation_started = true; - state.pending_ranges.insert((our_handle, range)); - } + // pub fn insert_pending_range(&self, our_handle: AreaOfInterestHandle, range: ThreeDRange) { + // let mut state = self.state_mut(); + // state.reconciliation_started = true; + // state.pending_ranges.insert((our_handle, range)); + // } fn state(&self) -> Ref { self.0.state.borrow() @@ -286,8 +325,12 @@ struct SessionState { our_resources: ScopedResources, their_resources: ScopedResources, reconciliation_started: bool, - pending_ranges: HashSet<(AreaOfInterestHandle, ThreeDRange)>, + our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, + their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, + our_range_counter: u64, + their_range_counter: u64, pending_entries: Option, + // pending_entries_to_send: Option, challenge: ChallengeState, aoi_queue: AoiQueue, } @@ -305,8 +348,12 @@ impl SessionState { reconciliation_started: false, our_resources: Default::default(), their_resources: Default::default(), - pending_ranges: Default::default(), + our_range_counter: 0, + their_range_counter: 0, + our_uncovered_ranges: Default::default(), + their_uncovered_ranges: Default::default(), pending_entries: Default::default(), + // pending_entries_to_send: Default::default(), aoi_queue: Default::default(), } } @@ -388,20 +435,21 @@ impl SessionState { Ok(()) } - fn clear_pending_range_if_some( + fn our_range_covered( &mut self, our_handle: AreaOfInterestHandle, - pending_range: Option<&ThreeDRange>, + range_count: u64, ) -> Result<(), Error> { - if let Some(range) = pending_range { - // TODO: avoid clone - if !self.pending_ranges.remove(&(our_handle, range.clone())) { - warn!("received duplicate final reply for range marker"); - Err(Error::InvalidMessageInCurrentState) - } else { - Ok(()) - } + // TODO: avoid clone + if !self.our_uncovered_ranges.remove(&(our_handle, range_count)) { + warn!("received duplicate cover for range"); + Err(Error::InvalidMessageInCurrentState) } else { + warn!( + ?range_count, + remaining = self.our_uncovered_ranges.len(), + "RECV COVER" + ); Ok(()) } } diff --git a/iroh-willow/src/util/task_set.rs b/iroh-willow/src/util/task_set.rs index 1e4ef29b8d..bf1fe91f49 100644 --- a/iroh-willow/src/util/task_set.rs +++ b/iroh-willow/src/util/task_set.rs @@ -42,6 +42,7 @@ impl TaskMap { TaskKey(k) } + pub fn poll_next( &mut self, cx: &mut Context<'_>, @@ -61,6 +62,9 @@ impl TaskMap { pub fn is_empty(&self) -> bool { self.tasks.is_empty() } + pub fn len(&self) -> usize { + self.tasks.len() + } } impl Stream for TaskMap { From 65b5494ff52c6705f754018394f5bbcc2af785cb Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 18 May 2024 00:50:53 +0200 Subject: [PATCH 044/198] wait for resources on reconcile start --- iroh-willow/src/session/reconciler.rs | 67 +++---- iroh-willow/src/session/state.rs | 241 +++++++++++++------------- 2 files changed, 138 insertions(+), 170 deletions(-) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 85b7a37411..c5cdf98b1a 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -95,38 +95,29 @@ impl Reconciler { &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { - let (namespace, range_count) = self.session.on_send_fingerprint(&message)?; - trace!("on_send_fingerprint start"); - let ReconciliationSendFingerprint { - range, - fingerprint: their_fingerprint, - sender_handle: their_handle, - receiver_handle: our_handle, - covers: _, // handled by Session::on_send_finerprint - } = message; - - let our_fingerprint = self.snapshot.fingerprint(namespace, &range)?; + let (namespace, range_count) = self.session.on_send_fingerprint(&message).await?; + let our_fingerprint = self.snapshot.fingerprint(namespace, &message.range)?; // case 1: fingerprint match. - if our_fingerprint == their_fingerprint { - let msg = ReconciliationAnnounceEntries { - range: range.clone(), + if our_fingerprint == message.fingerprint { + let reply = ReconciliationAnnounceEntries { + range: message.range.clone(), count: 0, want_response: false, will_sort: false, - sender_handle: our_handle, - receiver_handle: their_handle, + sender_handle: message.receiver_handle, + receiver_handle: message.sender_handle, covers: Some(range_count), }; - self.send(msg).await?; + self.send(reply).await?; } // case 2: fingerprint is empty - else if their_fingerprint.is_empty() { + else if message.fingerprint.is_empty() { self.announce_and_send_entries( namespace, - &range, - our_handle, - their_handle, + &message.range, + message.receiver_handle, + message.sender_handle, true, Some(range_count), None, @@ -138,15 +129,13 @@ impl Reconciler { // reply by splitting the range into parts unless it is very short self.split_range_and_send_parts( namespace, - &range, - our_handle, - their_handle, + &message.range, + message.receiver_handle, + message.sender_handle, range_count, ) .await?; } - self.session.their_range_covered(their_handle, range_count); - trace!("on_send_fingerprint done"); Ok(()) } async fn on_announce_entries( @@ -154,27 +143,15 @@ impl Reconciler { message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { trace!("on_announce_entries start"); - let (namespace, range_count) = self.session.on_announce_entries(&message)?; - let ReconciliationAnnounceEntries { - range, - count: _, - want_response, - will_sort: _, - sender_handle: their_handle, - receiver_handle: our_handle, - covers: _, - } = message; - - if want_response { + let (namespace, range_count) = self.session.on_announce_entries(&message).await?; + if message.want_response { self.announce_and_send_entries( namespace, - &range, - our_handle, - their_handle, + &message.range, + message.receiver_handle, + message.sender_handle, false, range_count, - // None, - // Some(range.clone()), None, ) .await?; @@ -210,7 +187,7 @@ impl Reconciler { their_handle: AreaOfInterestHandle, covers: Option, ) -> anyhow::Result<()> { - self.session.mark_range_uncovered(our_handle); + self.session.mark_range_pending(our_handle); let msg = ReconciliationSendFingerprint { range, fingerprint, @@ -246,7 +223,7 @@ impl Reconciler { covers, }; if want_response { - self.session.mark_range_uncovered(our_handle); + self.session.mark_range_pending(our_handle); } self.send(msg).await?; for authorised_entry in self diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index bac02d09a2..683a1dab01 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -8,7 +8,7 @@ use std::{ }; use futures_lite::Stream; -use tracing::{warn, Instrument, Span}; +use tracing::{Instrument, Span}; use crate::{ net::InitialTransmission, @@ -114,6 +114,19 @@ impl Session { .await } + pub fn get_our_resource( + &self, + selector: F, + handle: H, + ) -> Result + where + F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + { + let mut state = self.0.state.borrow_mut(); + let res = selector(&mut std::ops::DerefMut::deref_mut(&mut state).our_resources); + res.try_get(&handle).cloned() + } + pub async fn get_their_resource_eventually( &self, selector: F, @@ -124,8 +137,8 @@ impl Session { { let inner = &self.clone().0; poll_fn(move |cx| { - let mut inner = inner.state.borrow_mut(); - let res = selector(&mut std::ops::DerefMut::deref_mut(&mut inner).their_resources); + let mut state = inner.state.borrow_mut(); + let res = selector(&mut std::ops::DerefMut::deref_mut(&mut state).their_resources); let r = std::task::ready!(res.poll_get_eventually(handle, cx)); Poll::Ready(r.clone()) }) @@ -154,80 +167,96 @@ impl Session { Ok((our_handle, maybe_message)) } - pub fn mark_range_uncovered(&self, our_handle: AreaOfInterestHandle) { + pub fn mark_range_pending(&self, our_handle: AreaOfInterestHandle) { let mut state = self.state_mut(); state.reconciliation_started = true; let range_count = state.our_range_counter; state.our_uncovered_ranges.insert((our_handle, range_count)); - warn!(?range_count, len=state.our_uncovered_ranges.len(), "SEND FP - INSERT UNCOVERED"); state.our_range_counter += 1; } - pub fn their_range_covered( - &mut self, - their_handle: AreaOfInterestHandle, - their_range_counter: u64, - ) { - let mut state = self.state_mut(); - state - .their_uncovered_ranges - .remove(&(their_handle, their_range_counter)); - } - - pub fn on_announce_entries( + pub async fn on_announce_entries( &self, message: &ReconciliationAnnounceEntries, ) -> Result<(NamespaceId, Option), Error> { - let mut state = self.state_mut(); - if let Some(range_count) = message.covers { - state.our_range_covered(message.receiver_handle, range_count)?; - } - if state.pending_entries.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - let namespace = state.range_is_authorised( - &message.range, - &message.receiver_handle, - &message.sender_handle, - )?; - if message.count != 0 { - state.pending_entries = Some(message.count); - } - let range_count = if message.want_response { - let range_count = state.their_range_counter; - state.their_range_counter += 1; - Some(range_count) - } else { - None + let range_count = { + let mut state = self.state_mut(); + if let Some(range_count) = message.covers { + state.mark_range_covered(message.receiver_handle, range_count)?; + } + if state.pending_entries.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + if message.count != 0 { + state.pending_entries = Some(message.count); + } + if message.want_response { + let range_count = state.their_range_counter; + state.their_range_counter += 1; + Some(range_count) + } else { + None + } }; + let namespace = self + .range_is_authorised_eventually( + &message.range, + message.receiver_handle, + message.sender_handle, + ) + .await?; Ok((namespace, range_count)) } - pub fn on_send_fingerprint( + pub async fn on_send_fingerprint( &self, message: &ReconciliationSendFingerprint, ) -> Result<(NamespaceId, u64), Error> { - let mut state = self.state_mut(); - if let Some(range_counter) = message.covers { - state.our_range_covered(message.receiver_handle, range_counter)?; - } - - state.reconciliation_started = true; + let range_count = { + let mut state = self.state_mut(); + state.reconciliation_started = true; + if let Some(range_count) = message.covers { + state.mark_range_covered(message.receiver_handle, range_count)?; + } + let range_count = state.their_range_counter; + state.their_range_counter += 1; + range_count + }; - let range_count = state.their_range_counter; - state.their_range_counter += 1; - // state - // .their_uncovered_ranges - // .insert((message.sender_handle, range_count)); - - let namespace = state.range_is_authorised( - &message.range, - &message.receiver_handle, - &message.sender_handle, - )?; + let namespace = self + .range_is_authorised_eventually( + &message.range, + message.receiver_handle, + message.sender_handle, + ) + .await?; Ok((namespace, range_count)) } + async fn range_is_authorised_eventually( + &self, + range: &ThreeDRange, + receiver_handle: AreaOfInterestHandle, + sender_handle: AreaOfInterestHandle, + ) -> Result { + let our_namespace = self.our_aoi_to_namespace(&receiver_handle)?; + let their_namespace = self + .their_aoi_to_namespace_eventually(sender_handle) + .await?; + if our_namespace != their_namespace { + return Err(Error::AreaOfInterestNamespaceMismatch); + } + let our_aoi = self.get_our_resource(|r| &mut r.areas_of_interest, receiver_handle)?; + let their_aoi = self + .get_their_resource_eventually(|r| &mut r.areas_of_interest, sender_handle) + .await; + + if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + return Err(Error::RangeOutsideCapability); + } + Ok(our_namespace.into()) + } + pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { self.state_mut() .their_resources @@ -256,7 +285,7 @@ impl Session { ); state.reconciliation_started && state.our_uncovered_ranges.is_empty() - && state.their_uncovered_ranges.is_empty() + // && state.their_uncovered_ranges.is_empty() && state.pending_entries.is_none() } @@ -295,7 +324,7 @@ impl Session { } pub fn on_send_entry(&self) -> Result<(), Error> { - self.state_mut().on_send_entry() + self.state_mut().decrement_pending_entries() } pub fn bind_our_static_token( @@ -305,15 +334,35 @@ impl Session { self.state_mut().bind_our_static_token(token) } - // pub fn insert_pending_range(&self, our_handle: AreaOfInterestHandle, range: ThreeDRange) { - // let mut state = self.state_mut(); - // state.reconciliation_started = true; - // state.pending_ranges.insert((our_handle, range)); - // } + async fn their_aoi_to_namespace_eventually( + &self, + handle: AreaOfInterestHandle, + ) -> Result { + let aoi = self + .get_their_resource_eventually(|r| &mut r.areas_of_interest, handle) + .await; + let capability = self + .get_their_resource_eventually(|r| &mut r.capabilities, aoi.authorisation) + .await; + let namespace_id = capability.granted_namespace().into(); + Ok(namespace_id) + } + + fn our_aoi_to_namespace(&self, handle: &AreaOfInterestHandle) -> Result { + let state = self.state_mut(); + let aoi = state.our_resources.areas_of_interest.try_get(handle)?; + let capability = state + .our_resources + .capabilities + .try_get(&aoi.authorisation)?; + let namespace_id = capability.granted_namespace().into(); + Ok(namespace_id) + } fn state(&self) -> Ref { self.0.state.borrow() } + fn state_mut(&self) -> RefMut { self.0.state.borrow_mut() } @@ -326,11 +375,10 @@ struct SessionState { their_resources: ScopedResources, reconciliation_started: bool, our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, - their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, + // their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, our_range_counter: u64, their_range_counter: u64, pending_entries: Option, - // pending_entries_to_send: Option, challenge: ChallengeState, aoi_queue: AoiQueue, } @@ -351,9 +399,8 @@ impl SessionState { our_range_counter: 0, their_range_counter: 0, our_uncovered_ranges: Default::default(), - their_uncovered_ranges: Default::default(), + // their_uncovered_ranges: Default::default(), pending_entries: Default::default(), - // pending_entries_to_send: Default::default(), aoi_queue: Default::default(), } } @@ -423,7 +470,7 @@ impl SessionState { Ok(()) } - fn on_send_entry(&mut self) -> Result<(), Error> { + fn decrement_pending_entries(&mut self) -> Result<(), Error> { let remaining = self .pending_entries .as_mut() @@ -435,21 +482,14 @@ impl SessionState { Ok(()) } - fn our_range_covered( + fn mark_range_covered( &mut self, our_handle: AreaOfInterestHandle, range_count: u64, ) -> Result<(), Error> { - // TODO: avoid clone if !self.our_uncovered_ranges.remove(&(our_handle, range_count)) { - warn!("received duplicate cover for range"); Err(Error::InvalidMessageInCurrentState) } else { - warn!( - ?range_count, - remaining = self.our_uncovered_ranges.len(), - "RECV COVER" - ); Ok(()) } } @@ -465,55 +505,6 @@ impl SessionState { let msg = is_new.then(|| SetupBindStaticToken { static_token }); (handle, msg) } - - fn range_is_authorised( - &self, - range: &ThreeDRange, - receiver_handle: &AreaOfInterestHandle, - sender_handle: &AreaOfInterestHandle, - ) -> Result { - let our_namespace = self.handle_to_namespace_id(Scope::Ours, receiver_handle)?; - let their_namespace = self.handle_to_namespace_id(Scope::Theirs, sender_handle)?; - if our_namespace != their_namespace { - return Err(Error::AreaOfInterestNamespaceMismatch); - } - let our_aoi = self.handle_to_aoi(Scope::Ours, receiver_handle)?; - let their_aoi = self.handle_to_aoi(Scope::Theirs, sender_handle)?; - - if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { - return Err(Error::RangeOutsideCapability); - } - Ok(our_namespace.into()) - } - - fn handle_to_aoi( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result<&SetupBindAreaOfInterest, Error> { - self.resources(scope).areas_of_interest.try_get(handle) - } - - fn handle_to_namespace_id( - &self, - scope: Scope, - handle: &AreaOfInterestHandle, - ) -> Result { - let aoi = self.resources(scope).areas_of_interest.try_get(handle)?; - let capability = self - .resources(scope) - .capabilities - .try_get(&aoi.authorisation)?; - let namespace_id = capability.granted_namespace().into(); - Ok(namespace_id) - } - - fn resources(&self, scope: Scope) -> &ScopedResources { - match scope { - Scope::Ours => &self.our_resources, - Scope::Theirs => &self.their_resources, - } - } } #[derive(Debug, Clone)] From 228a23845df691980f34d2f812edb996a1a489b4 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 18 May 2024 00:59:59 +0200 Subject: [PATCH 045/198] more cleanup --- Cargo.toml | 8 +++++++ iroh-willow/src/session/state.rs | 40 ++++++++++++++------------------ 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3eae79edb2..1efcf0d6d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,14 @@ opt-level = 3 panic = 'abort' incremental = false +[profile.dev.package.ed25519-dalek] +opt-level = 3 + +[profile.dev.package.curve25519-dalek] +opt-level = 3 + +[profile.dev.package.iroh-blake3] +opt-level = 3 [workspace.lints.rust] missing_debug_implementations = "warn" diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 683a1dab01..08486dfe67 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -38,6 +38,7 @@ pub struct Session(Rc); #[derive(derive_more::Debug)] struct SessionInner { + our_role: Role, state: RefCell, send: ChannelSenders, tasks: RefCell>>, @@ -49,8 +50,9 @@ impl Session { our_role: Role, initial_transmission: InitialTransmission, ) -> Self { - let state = SessionState::new(our_role, initial_transmission); + let state = SessionState::new(initial_transmission); Self(Rc::new(SessionInner { + our_role, state: RefCell::new(state), send, tasks: Default::default(), @@ -103,7 +105,7 @@ impl Session { } pub fn our_role(&self) -> Role { - self.0.state.borrow().our_role + self.0.our_role } pub async fn next_aoi_intersection(&self) -> Option { @@ -300,8 +302,8 @@ impl Session { } pub fn on_commitment_reveal(&self, msg: CommitmentReveal) -> Result<(), Error> { + let our_role = self.our_role(); let mut state = self.state_mut(); - let our_role = state.our_role; state.challenge.reveal(our_role, msg.nonce) } @@ -329,9 +331,15 @@ impl Session { pub fn bind_our_static_token( &self, - token: StaticToken, + static_token: StaticToken, ) -> (StaticTokenHandle, Option) { - self.state_mut().bind_our_static_token(token) + let mut state = self.state_mut(); + let (handle, is_new) = state + .our_resources + .static_tokens + .bind_if_new(static_token.clone()); + let msg = is_new.then(|| SetupBindStaticToken { static_token }); + (handle, msg) } async fn their_aoi_to_namespace_eventually( @@ -370,7 +378,6 @@ impl Session { #[derive(Debug)] struct SessionState { - our_role: Role, our_resources: ScopedResources, their_resources: ScopedResources, reconciliation_started: bool, @@ -384,14 +391,13 @@ struct SessionState { } impl SessionState { - fn new(our_role: Role, initial_transmission: InitialTransmission) -> Self { + fn new(initial_transmission: InitialTransmission) -> Self { let challenge_state = ChallengeState::Committed { our_nonce: initial_transmission.our_nonce, received_commitment: initial_transmission.received_commitment, }; // TODO: make use of initial_transmission.their_max_payload_size. Self { - our_role, challenge: challenge_state, reconciliation_started: false, our_resources: Default::default(), @@ -436,15 +442,15 @@ impl SessionState { Scope::Theirs => self.their_resources.areas_of_interest.bind(msg), }; - let haystack = match scope { + let other_resources = match scope { Scope::Ours => &self.their_resources, Scope::Theirs => &self.our_resources, }; - for (candidate_handle, candidate) in haystack.areas_of_interest.iter() { + for (candidate_handle, candidate) in other_resources.areas_of_interest.iter() { let candidate_handle = *candidate_handle; // Ignore areas without a capability. - let Some(cap) = haystack.capabilities.get(&candidate.authorisation) else { + let Some(cap) = other_resources.capabilities.get(&candidate.authorisation) else { continue; }; // Ignore areas for a different namespace. @@ -493,18 +499,6 @@ impl SessionState { Ok(()) } } - - fn bind_our_static_token( - &mut self, - static_token: StaticToken, - ) -> (StaticTokenHandle, Option) { - let (handle, is_new) = self - .our_resources - .static_tokens - .bind_if_new(static_token.clone()); - let msg = is_new.then(|| SetupBindStaticToken { static_token }); - (handle, msg) - } } #[derive(Debug, Clone)] From 4c46b6566e6cda4654862c42fd5f4024f9ee804c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 18 May 2024 01:23:36 +0200 Subject: [PATCH 046/198] more cleanup and better code structure --- iroh-willow/src/session.rs | 13 +++- iroh-willow/src/session/reconciler.rs | 2 +- iroh-willow/src/session/resource.rs | 37 ++++++--- iroh-willow/src/session/state.rs | 106 ++++++-------------------- iroh-willow/src/util.rs | 2 + 5 files changed, 65 insertions(+), 95 deletions(-) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 907907c1b6..65fee8cba8 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,6 +1,8 @@ use std::collections::{HashMap, HashSet}; -use crate::proto::wgps::{AccessChallenge, ChallengeHash}; +use crate::proto::grouping::Area; +use crate::proto::keys::NamespaceId; +use crate::proto::wgps::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}; use crate::proto::{grouping::AreaOfInterest, wgps::ReadCapability}; pub mod channels; @@ -76,3 +78,12 @@ pub enum Scope { Theirs, } +/// Intersection between two areas of interest. +#[derive(Debug, Clone)] +pub struct AreaOfInterestIntersection { + pub our_handle: AreaOfInterestHandle, + pub their_handle: AreaOfInterestHandle, + pub intersection: Area, + pub namespace: NamespaceId, +} + diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index c5cdf98b1a..bb94e3022d 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -12,7 +12,7 @@ use crate::{ }, willow::AuthorisedEntry, }, - session::{channels::MessageReceiver, state::AreaOfInterestIntersection, Error, Session}, + session::{channels::MessageReceiver, AreaOfInterestIntersection, Error, Session}, store::{ReadonlyStore, Shared, SplitAction, Store, SyncConfig}, util::channel::WriteError, }; diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 7e056f68f5..2e3cdc1cd3 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -11,12 +11,12 @@ use crate::proto::wgps::{ use super::Error; #[derive(Debug, Default)] -pub struct ScopedResources { +pub struct ResourceMaps { pub capabilities: ResourceMap, pub areas_of_interest: ResourceMap, pub static_tokens: ResourceMap, } -impl ScopedResources { +impl ResourceMaps { pub fn register_waker(&mut self, handle: ResourceHandle, waker: Waker) { tracing::trace!(?handle, "register_notify"); match handle { @@ -27,14 +27,31 @@ impl ScopedResources { } } - // pub fn get(&self, scope: Scope, handle: &Handle) { - // match handle { - // Handle::AreaOfInterest(h) => self.areas_of_interest.get(h), - // Handle::Intersection(h) => unimplemented!(), - // Handle::Capability(h) => self.capabilities.get(h), - // Handle::StaticToken(_h) => self.static_tokens.get(h), - // } - // } + pub fn get( + &self, + selector: F, + handle: H, + ) -> Result + where + F: for<'a> Fn(&'a Self) -> &'a ResourceMap, + { + let res = selector(&self); + res.try_get(&handle).cloned() + } + + pub fn poll_get_eventually( + &mut self, + selector: F, + handle: H, + cx: &mut Context<'_>, + ) -> Poll + where + F: for<'a> Fn(&'a mut Self) -> &'a mut ResourceMap, + { + let res = selector(self); + let r = std::task::ready!(res.poll_get_eventually(handle, cx)); + Poll::Ready(r.clone()) + } } #[derive(Debug)] diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 08486dfe67..b7ceaf4d16 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -1,10 +1,10 @@ use std::{ cell::{Ref, RefCell, RefMut}, - collections::{HashSet, VecDeque}, + collections::HashSet, future::poll_fn, pin::Pin, rc::Rc, - task::{Poll, Waker}, + task::Poll, }; use futures_lite::Stream; @@ -14,7 +14,7 @@ use crate::{ net::InitialTransmission, proto::{ challenge::ChallengeState, - grouping::{Area, ThreeDRange}, + grouping::ThreeDRange, keys::NamespaceId, wgps::{ AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, IntersectionHandle, @@ -24,13 +24,13 @@ use crate::{ }, }, store::{KeyStore, Shared}, - util::{channel::WriteError, task_set::TaskMap}, + util::{channel::WriteError, queue::Queue, task_set::TaskMap}, }; use super::{ channels::ChannelSenders, - resource::{ResourceMap, ScopedResources}, - Error, Role, Scope, + resource::{ResourceMap, ResourceMaps}, + AreaOfInterestIntersection, Error, Role, Scope, }; #[derive(Debug, Clone)] @@ -110,7 +110,7 @@ impl Session { pub async fn next_aoi_intersection(&self) -> Option { poll_fn(|cx| { - let mut aoi_queue = &mut self.0.state.borrow_mut().aoi_queue; + let mut aoi_queue = &mut self.0.state.borrow_mut().intersetion_queue; Pin::new(&mut aoi_queue).poll_next(cx) }) .await @@ -122,11 +122,10 @@ impl Session { handle: H, ) -> Result where - F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + F: for<'a> Fn(&'a ResourceMaps) -> &'a ResourceMap, { - let mut state = self.0.state.borrow_mut(); - let res = selector(&mut std::ops::DerefMut::deref_mut(&mut state).our_resources); - res.try_get(&handle).cloned() + let state = self.0.state.borrow_mut(); + state.our_resources.get(&selector, handle) } pub async fn get_their_resource_eventually( @@ -135,14 +134,14 @@ impl Session { handle: H, ) -> R where - F: for<'a> Fn(&'a mut ScopedResources) -> &'a mut ResourceMap, + F: for<'a> Fn(&'a mut ResourceMaps) -> &'a mut ResourceMap, { let inner = &self.clone().0; poll_fn(move |cx| { let mut state = inner.state.borrow_mut(); - let res = selector(&mut std::ops::DerefMut::deref_mut(&mut state).their_resources); - let r = std::task::ready!(res.poll_get_eventually(handle, cx)); - Poll::Ready(r.clone()) + state + .their_resources + .poll_get_eventually(&selector, handle, cx) }) .await } @@ -248,7 +247,7 @@ impl Session { if our_namespace != their_namespace { return Err(Error::AreaOfInterestNamespaceMismatch); } - let our_aoi = self.get_our_resource(|r| &mut r.areas_of_interest, receiver_handle)?; + let our_aoi = self.get_our_resource(|r| &r.areas_of_interest, receiver_handle)?; let their_aoi = self .get_their_resource_eventually(|r| &mut r.areas_of_interest, sender_handle) .await; @@ -287,7 +286,6 @@ impl Session { ); state.reconciliation_started && state.our_uncovered_ranges.is_empty() - // && state.their_uncovered_ranges.is_empty() && state.pending_entries.is_none() } @@ -378,16 +376,15 @@ impl Session { #[derive(Debug)] struct SessionState { - our_resources: ScopedResources, - their_resources: ScopedResources, + challenge: ChallengeState, + our_resources: ResourceMaps, + their_resources: ResourceMaps, reconciliation_started: bool, - our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, - // their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, our_range_counter: u64, their_range_counter: u64, + our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, pending_entries: Option, - challenge: ChallengeState, - aoi_queue: AoiQueue, + intersetion_queue: Queue, } impl SessionState { @@ -405,9 +402,8 @@ impl SessionState { our_range_counter: 0, their_range_counter: 0, our_uncovered_ranges: Default::default(), - // their_uncovered_ranges: Default::default(), pending_entries: Default::default(), - aoi_queue: Default::default(), + intersetion_queue: Default::default(), } } @@ -464,13 +460,13 @@ impl SessionState { Scope::Ours => (handle, candidate_handle), Scope::Theirs => (candidate_handle, handle), }; - let shared = AreaOfInterestIntersection { + let info = AreaOfInterestIntersection { our_handle, their_handle, intersection, namespace: namespace.into(), }; - self.aoi_queue.push(shared); + self.intersetion_queue.push_back(info); } } Ok(()) @@ -500,59 +496,3 @@ impl SessionState { } } } - -#[derive(Debug, Clone)] -pub struct AreaOfInterestIntersection { - pub our_handle: AreaOfInterestHandle, - pub their_handle: AreaOfInterestHandle, - pub intersection: Area, - pub namespace: NamespaceId, -} - -#[derive(Default, Debug)] -pub struct AoiQueue { - found: VecDeque, - // closed: bool, - wakers: VecDeque, -} - -impl AoiQueue { - pub fn push(&mut self, pair: AreaOfInterestIntersection) { - self.found.push_back(pair); - self.wake(); - } - // pub fn close(&mut self) { - // self.closed = true; - // self.wake(); - // } - fn wake(&mut self) { - for waker in self.wakers.drain(..) { - waker.wake(); - } - } - - pub fn poll_next( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - // if self.closed { - // return Poll::Ready(None); - // } - if let Some(item) = self.found.pop_front() { - Poll::Ready(Some(item)) - } else { - self.wakers.push_back(cx.waker().to_owned()); - Poll::Pending - } - } -} - -impl Stream for AoiQueue { - type Item = AreaOfInterestIntersection; - fn poll_next( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Self::poll_next(self.get_mut(), cx) - } -} diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 399d21dca6..e17b9562fa 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -2,6 +2,7 @@ use std::{io, time::SystemTime}; pub mod channel; pub mod task_set; +pub mod queue; pub fn system_time_now() -> u64 { SystemTime::now() @@ -31,3 +32,4 @@ pub enum DecodeOutcome { NeedMoreData, Decoded { item: T, consumed: usize }, } + From 52f3f3e476b501a11fd6d107eaf908a41c83f025 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 29 May 2024 15:56:43 +0200 Subject: [PATCH 047/198] cleanup and document utils --- iroh-willow/src/actor.rs | 4 +- iroh-willow/src/proto/meadowcap.rs | 2 +- iroh-willow/src/proto/wgps.rs | 2 +- iroh-willow/src/proto/willow.rs | 4 +- iroh-willow/src/session.rs | 1 - iroh-willow/src/session/state.rs | 6 +- iroh-willow/src/util.rs | 36 ++--------- iroh-willow/src/util/channel.rs | 2 +- iroh-willow/src/util/codec.rs | 39 ++++++++++++ iroh-willow/src/util/queue.rs | 60 +++++++++++++++++++ iroh-willow/src/util/{task_set.rs => task.rs} | 50 +++++++++++----- iroh-willow/src/util/time.rs | 9 +++ 12 files changed, 156 insertions(+), 59 deletions(-) create mode 100644 iroh-willow/src/util/codec.rs create mode 100644 iroh-willow/src/util/queue.rs rename iroh-willow/src/util/{task_set.rs => task.rs} (60%) create mode 100644 iroh-willow/src/util/time.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 574144a3c3..8fc6575a8a 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -16,7 +16,7 @@ use crate::{ }, session::{Channels, Error, Role, Session, SessionInit}, store::{KeyStore, ReadonlyStore, Shared, Store}, - util::task_set::{TaskKey, TaskMap}, + util::task::{JoinMap, TaskKey}, }; pub const INBOX_CAP: usize = 1024; @@ -200,7 +200,7 @@ pub struct StorageThread { key_store: Shared, next_session_id: u64, sessions: HashMap, - session_tasks: TaskMap>, + session_tasks: JoinMap>, } impl StorageThread { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index b525730dbf..40eebce2c3 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::util::Encoder; +use crate::util::codec::Encoder; use super::{ grouping::Area, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index 0b09a1dc6d..48266c36f5 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -5,7 +5,7 @@ use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; use strum::{EnumCount, VariantArray}; -use crate::util::{DecodeOutcome, Decoder, Encoder}; +use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index cdcf375156..7a45c2bef7 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -4,7 +4,7 @@ use bytes::Bytes; use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; -use crate::util::system_time_now; +use crate::util::time::system_time_now; use super::{ keys::{self, UserSecretKey}, @@ -347,7 +347,7 @@ pub mod encodings { use bytes::Bytes; - use crate::{proto::keys::PUBLIC_KEY_LENGTH, util::Encoder}; + use crate::{proto::keys::PUBLIC_KEY_LENGTH, util::codec::Encoder}; use super::{Entry, Path, DIGEST_LENGTH}; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 65fee8cba8..e035da67f7 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -86,4 +86,3 @@ pub struct AreaOfInterestIntersection { pub intersection: Area, pub namespace: NamespaceId, } - diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index b7ceaf4d16..89afcaf1e1 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -24,7 +24,7 @@ use crate::{ }, }, store::{KeyStore, Shared}, - util::{channel::WriteError, queue::Queue, task_set::TaskMap}, + util::{channel::WriteError, queue::Queue, task::JoinMap}, }; use super::{ @@ -41,7 +41,7 @@ struct SessionInner { our_role: Role, state: RefCell, send: ChannelSenders, - tasks: RefCell>>, + tasks: RefCell>>, } impl Session { @@ -73,7 +73,7 @@ impl Session { pub async fn join_next_task(&self) -> Option<(Span, Result<(), Error>)> { poll_fn(|cx| { let mut tasks = self.0.tasks.borrow_mut(); - let res = std::task::ready!(Pin::new(&mut tasks).poll_next(cx)); + let res = std::task::ready!(Pin::new(&mut tasks).poll_join_next(cx)); let res = match res { None => None, Some((key, Ok(r))) => Some((key, r)), diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index e17b9562fa..ff1d8002ba 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -1,35 +1,7 @@ -use std::{io, time::SystemTime}; +//! Various utilties and data structures used in this crate. pub mod channel; -pub mod task_set; +pub mod codec; pub mod queue; - -pub fn system_time_now() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("time drift") - .as_micros() as u64 -} - -pub trait Encoder: std::fmt::Debug { - fn encoded_len(&self) -> usize; - - fn encode_into(&self, out: &mut W) -> anyhow::Result<()>; - - fn encode(&self) -> anyhow::Result> { - let mut out = Vec::with_capacity(self.encoded_len()); - self.encode_into(&mut out)?; - Ok(out) - } -} - -pub trait Decoder: Sized { - fn decode_from(data: &[u8]) -> anyhow::Result>; -} - -#[derive(Debug)] -pub enum DecodeOutcome { - NeedMoreData, - Decoded { item: T, consumed: usize }, -} - +pub mod task; +pub mod time; diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index dd59f0bb71..1c8266218c 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -11,7 +11,7 @@ use bytes::{Buf, Bytes, BytesMut}; use futures_lite::Stream; use tokio::io::AsyncWrite; -use super::{DecodeOutcome, Decoder, Encoder}; +use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; pub fn pipe(cap: usize) -> (Writer, Reader) { let shared = Shared::new(cap, Guarantees::Unlimited); diff --git a/iroh-willow/src/util/codec.rs b/iroh-willow/src/util/codec.rs new file mode 100644 index 0000000000..fbb9edb7ef --- /dev/null +++ b/iroh-willow/src/util/codec.rs @@ -0,0 +1,39 @@ +//! Traits for encoding and decoding values to and from bytes. + +use std::{fmt, io}; + +/// Trait for encoding values into bytes. +pub trait Encoder: fmt::Debug { + /// Returns the length (in bytes) of the encoded value. + fn encoded_len(&self) -> usize; + + /// Encode [`Self`] into a writable buffer which implements `io::Write`. + fn encode_into(&self, out: &mut W) -> anyhow::Result<()>; + + /// Encode [`Self`] into a vector of bytes. + fn encode(&self) -> anyhow::Result> { + let mut out = Vec::with_capacity(self.encoded_len()); + self.encode_into(&mut out)?; + Ok(out) + } +} + +/// Trait for decoding values from bytes. +pub trait Decoder: Sized { + /// Decode [`Self`] from a byte slice. + fn decode_from(data: &[u8]) -> anyhow::Result>; +} + +/// The outcome of [`Decoder::decode_from`] +#[derive(Debug)] +pub enum DecodeOutcome { + /// Not enough data to decode the value. + NeedMoreData, + /// Decoded a value. + Decoded { + /// The decoded value. + item: T, + /// The number of bytes used for decoding the value. + consumed: usize, + }, +} diff --git a/iroh-willow/src/util/queue.rs b/iroh-willow/src/util/queue.rs new file mode 100644 index 0000000000..b131c9edbe --- /dev/null +++ b/iroh-willow/src/util/queue.rs @@ -0,0 +1,60 @@ +//! A simple asynchronous queue. + +use std::{ + collections::VecDeque, + pin::Pin, + task::{Poll, Waker}, +}; + +use futures_lite::Stream; + +/// A simple unbounded queue. +/// +/// Values are pushed into the queue, synchronously. +/// The queue can be polled for the next value from the start. +#[derive(Debug)] +pub struct Queue { + items: VecDeque, + wakers: VecDeque, +} + +impl Default for Queue { + fn default() -> Self { + Self { + items: Default::default(), + wakers: Default::default(), + } + } +} + +impl Queue { + /// Push a new item to the back of the queue. + pub fn push_back(&mut self, pair: T) { + self.items.push_back(pair); + for waker in self.wakers.drain(..) { + waker.wake(); + } + } + + /// Attempt to pop the next item from the front of the queue. + /// + /// Returns [`Poll::Pending`] if no items are currently in the queue. + pub fn poll_pop_front(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + if let Some(item) = self.items.pop_front() { + Poll::Ready(Some(item)) + } else { + self.wakers.push_back(cx.waker().to_owned()); + Poll::Pending + } + } +} + +impl Stream for Queue { + type Item = T; + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + Self::poll_pop_front(self.get_mut(), cx) + } +} diff --git a/iroh-willow/src/util/task_set.rs b/iroh-willow/src/util/task.rs similarity index 60% rename from iroh-willow/src/util/task_set.rs rename to iroh-willow/src/util/task.rs index bf1fe91f49..c1b5d09e85 100644 --- a/iroh-willow/src/util/task_set.rs +++ b/iroh-willow/src/util/task.rs @@ -1,3 +1,5 @@ +//! Utilities for working with tokio tasks. + use std::{ collections::HashMap, future::Future, @@ -7,22 +9,27 @@ use std::{ use futures_concurrency::future::{future_group, FutureGroup}; use futures_lite::Stream; +use tokio::task::JoinError; #[derive(derive_more::Debug, Eq, PartialEq)] #[debug("{:?}", _0)] pub struct TaskKey(future_group::Key); -/// A set of tasks. +/// A collection of tasks spawned on a Tokio runtime, associated with hash map keys. /// /// Similar to [`tokio::task::JoinSet`] but can also contain local tasks, and each task is /// identified by a key which is returned upon completion of the task. +/// +/// Uses [`tokio::task::spawn`] and [`tokio::task::spawn_local`] in combination with [`future_group`] for keeping the join handles around. +// +// TODO: Replace with [`tokio::task::JoinMap`] once it doesn't need tokio unstable anymore. #[derive(Debug)] -pub struct TaskMap { +pub struct JoinMap { tasks: future_group::Keyed>, keys: HashMap, } -impl Default for TaskMap { +impl Default for JoinMap { fn default() -> Self { Self { tasks: FutureGroup::new().keyed(), @@ -31,10 +38,13 @@ impl Default for TaskMap { } } -impl TaskMap { +impl JoinMap { + /// Create a new [`TaskMap`]. pub fn new() -> Self { Self::default() } + + /// Spawn a new task on the currently executing [`tokio::task::LocalSet`]. pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { let handle = tokio::task::spawn_local(future); let k = self.tasks.insert(handle); @@ -42,11 +52,11 @@ impl TaskMap { TaskKey(k) } - - pub fn poll_next( + /// Poll for one of the tasks in the map to complete. + pub fn poll_join_next( &mut self, cx: &mut Context<'_>, - ) -> Poll)>> { + ) -> Poll)>> { let Some((key, item)) = std::task::ready!(Pin::new(&mut self.tasks).poll_next(cx)) else { return Poll::Ready(None); }; @@ -54,31 +64,39 @@ impl TaskMap { Poll::Ready(Some((key, item))) } + /// Remove a task from the map. pub fn remove(&mut self, task_key: &TaskKey) -> bool { self.keys.remove(&task_key.0); self.tasks.remove(task_key.0) } + /// Returns `true` if the task map is currently empty. pub fn is_empty(&self) -> bool { self.tasks.is_empty() } + + /// Returns the number of tasks currently in the map. pub fn len(&self) -> usize { self.tasks.len() } } -impl Stream for TaskMap { - type Item = (K, Result); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Self::poll_next(self.get_mut(), cx) - } -} - -impl TaskMap { +impl JoinMap { + /// Spawn a new, non-local task on the current tokio runtime. pub fn spawn + 'static + Send>(&mut self, future: F) -> TaskKey { let handle = tokio::task::spawn(future); let key = self.tasks.insert(handle); TaskKey(key) } } + +impl Stream for JoinMap { + type Item = (K, Result); + + /// Poll for one of the tasks to complete. + /// + /// See [`Self::poll_join_next`] for details. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Self::poll_join_next(self.get_mut(), cx) + } +} diff --git a/iroh-willow/src/util/time.rs b/iroh-willow/src/util/time.rs new file mode 100644 index 0000000000..a1c037372b --- /dev/null +++ b/iroh-willow/src/util/time.rs @@ -0,0 +1,9 @@ +use std::time::SystemTime; + +/// Returns the current system time in microseconds since [`SystemTime::UNIX_EPOCH`]. +pub fn system_time_now() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time drift") + .as_micros() as u64 +} From 24f7a282606ffbd07e73f786c0cd56175a49a032 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 29 May 2024 16:16:23 +0200 Subject: [PATCH 048/198] document channel --- iroh-willow/src/util/channel.rs | 116 +++++++++++++++++++++++--------- 1 file changed, 85 insertions(+), 31 deletions(-) diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 1c8266218c..87f56a0229 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -1,4 +1,5 @@ use std::{ + cmp, future::poll_fn, io, marker::PhantomData, @@ -13,6 +14,7 @@ use tokio::io::AsyncWrite; use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; +/// Create an in-memory pipe. pub fn pipe(cap: usize) -> (Writer, Reader) { let shared = Shared::new(cap, Guarantees::Unlimited); let writer = Writer { @@ -22,8 +24,21 @@ pub fn pipe(cap: usize) -> (Writer, Reader) { (writer, reader) } -pub fn outbound_channel(cap: usize, guarantees: Guarantees) -> (Sender, Reader) { - let shared = Shared::new(cap, guarantees); +/// Create a new channel with a message [`Sender`] on the transmit side and a byte [`Reader`] on +/// the receive side. +/// +/// This is used for data sent from the application into the network: The application code queues +/// messages for sending, and the networking code consumes a bytes stream of the messages encoded +/// with [`Encoder`]. +/// +/// Optionally the channel can be assigned a limited number of [`Guarantees`]. If limited, a total +/// limit of sendable bytes will be respected, and no further sends can happen once it is +/// exhausted. The amount of guarantees can be raised with [`Sender::add_guarantees`]. +pub fn outbound_channel( + max_buffer_size: usize, + guarantees: Guarantees, +) -> (Sender, Reader) { + let shared = Shared::new(max_buffer_size, guarantees); let sender = Sender { shared: shared.clone(), _ty: PhantomData, @@ -32,8 +47,14 @@ pub fn outbound_channel(cap: usize, guarantees: Guarantees) -> (Send (sender, reader) } -pub fn inbound_channel(cap: usize) -> (Writer, Receiver) { - let shared = Shared::new(cap, Guarantees::Unlimited); +/// Create a new channel with a byte [`Writer`] on the transmit side and a message [`Receiver`] on +/// the receive side. +/// +/// This is used for data incoming from the network: The networking code copies received data into +/// the channel, and the application code processes the messages parsed by the [`Decoder`] from the data +/// in the channel. +pub fn inbound_channel(max_buffer_size: usize) -> (Writer, Receiver) { + let shared = Shared::new(max_buffer_size, Guarantees::Unlimited); let writer = Writer { shared: shared.clone(), }; @@ -89,9 +110,9 @@ impl Guarantees { } } -// Shared state for a in-memory pipe. -// -// Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 +/// Shared state for a in-memory pipe. +/// +/// Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 #[derive(Debug)] struct Shared { buf: BytesMut, @@ -103,10 +124,10 @@ struct Shared { } impl Shared { - fn new(cap: usize, guarantees: Guarantees) -> Arc> { + fn new(max_buffer_size: usize, guarantees: Guarantees) -> Arc> { let shared = Self { buf: BytesMut::new(), - max_buffer_size: cap, + max_buffer_size, write_wakers: Default::default(), read_wakers: Default::default(), is_closed: false, @@ -115,20 +136,20 @@ impl Shared { Arc::new(Mutex::new(shared)) } - fn set_cap(&mut self, cap: usize) -> bool { - if cap >= self.buf.len() { - self.max_buffer_size = cap; - self.wake_writable(); - true - } else { - false - } - } + // fn set_max_buffer_size(&mut self, max_buffer_size: usize) -> bool { + // if max_buffer_size >= self.buf.len() { + // self.max_buffer_size = max_buffer_size; + // self.wake_writable(); + // true + // } else { + // false + // } + // } fn add_guarantees(&mut self, amount: u64) { - let previous = self.remaining_write_capacity(); + let current_write_capacity = self.remaining_write_capacity(); self.guarantees.add(amount); - if self.remaining_write_capacity() > previous { + if self.remaining_write_capacity() > current_write_capacity { self.wake_writable(); } } @@ -192,7 +213,7 @@ impl Shared { return Poll::Pending; } - let len = std::cmp::min(buf.len(), avail); + let len = cmp::min(buf.len(), avail); self.buf.extend_from_slice(&buf[..len]); self.guarantees.r#use(len as u64); self.wake_readable(); @@ -236,7 +257,7 @@ impl Shared { cx: &mut task::Context<'_>, ) -> Poll>> { let buf = self.peek(); - if self.is_closed() && self.is_empty() { + if self.is_closed() && buf.is_empty() { return Poll::Ready(None); } match T::decode_from(buf).map_err(ReadError::Decode)? { @@ -257,7 +278,7 @@ impl Shared { } fn remaining_write_capacity(&self) -> usize { - std::cmp::min( + cmp::min( self.max_buffer_size - self.buf.len(), self.guarantees.get() as usize, ) @@ -268,6 +289,7 @@ impl Shared { waker.wake(); } } + fn wake_writable(&mut self) { for waker in self.write_wakers.drain(..) { waker.wake(); @@ -275,30 +297,45 @@ impl Shared { } } +/// Asynchronous reader to read bytes from a channel. #[derive(Debug)] pub struct Reader { shared: Arc>, } impl Reader { + /// Close the channel. + /// + /// See [`Sender::close`] for details. pub fn close(&self) { self.shared.lock().unwrap().close() } + /// Read a chunk of bytes from the channel. + /// + /// Returns `None` once the channel is closed and the channel buffer is empty. pub async fn read_bytes(&self) -> Option { poll_fn(|cx| self.shared.lock().unwrap().poll_read_bytes(cx)).await } } +/// Asynchronous writer to write bytes into a channel. +/// +/// The writer implements [`AsyncWrite`]. #[derive(Debug)] pub struct Writer { shared: Arc>, } impl Writer { + /// Close the channel. + /// + /// See [`Sender::close`] for details. pub fn close(&self) { self.shared.lock().unwrap().close() } + + /// Get the maximum buffer size of the channel. pub fn max_buffer_size(&self) -> usize { self.shared.lock().unwrap().max_buffer_size } @@ -336,21 +373,29 @@ pub struct Sender { } impl Sender { + /// Close the channel. + /// + /// Sending messages after calling `close` will return an error. + /// + /// The receiving end will keep processing the current buffer, and will return `None` once + /// empty. pub fn close(&self) { self.shared.lock().unwrap().close() } - pub fn set_cap(&self, cap: usize) -> bool { - self.shared.lock().unwrap().set_cap(cap) - } - + /// Send a message into the channel. pub async fn send_message(&self, message: &T) -> Result<(), WriteError> { poll_fn(|cx| self.shared.lock().unwrap().poll_send_message(message, cx)).await } + /// Add guarantees available for sending messages. pub fn add_guarantees(&self, amount: u64) { self.shared.lock().unwrap().add_guarantees(amount) } + + // pub fn set_max_buffer_size(&self, max_buffer_size: usize) -> bool { + // self.shared.lock().unwrap().set_max_buffer_size(max_buffer_size) + // } } #[derive(Debug)] @@ -360,17 +405,26 @@ pub struct Receiver { } impl Receiver { + /// Close the channel. + /// + /// See [`Sender::close`] for details. pub fn close(&self) { self.shared.lock().unwrap().close() } - pub fn set_cap(&self, cap: usize) -> bool { - self.shared.lock().unwrap().set_cap(cap) - } - + /// Receive the next message from the channel. + /// + /// Returns `None` if the channel is closed and the buffer is empty. pub async fn recv(&self) -> Option> { poll_fn(|cx| self.shared.lock().unwrap().poll_recv_message(cx)).await } + + // pub fn set_max_buffer_size(&self, max_buffer_size: usize) -> bool { + // self.shared + // .lock() + // .unwrap() + // .set_max_buffer_size(max_buffer_size) + // } } impl Stream for Receiver { From e96e8e3403af3a561f3f2999b12d08ebd14178a5 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 29 May 2024 16:36:46 +0200 Subject: [PATCH 049/198] more cleanups and docs --- iroh-willow/src/actor.rs | 23 +++++++++++++++-------- iroh-willow/src/session/run.rs | 14 +++++++------- iroh-willow/src/session/state.rs | 28 ++++++++++++++-------------- iroh-willow/src/worker.rs | 0 4 files changed, 36 insertions(+), 29 deletions(-) delete mode 100644 iroh-willow/src/worker.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 8fc6575a8a..15a3e55b8d 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -236,6 +236,12 @@ impl StorageThread { Ok(()) } + fn next_session_id(&mut self) -> u64 { + let id = self.next_session_id; + self.next_session_id += 1; + id + } + fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { trace!(%message, "tick: handle_message"); match message { @@ -248,17 +254,18 @@ impl StorageThread { init, on_finish, } => { - let id = self.next_session_id; - self.next_session_id += 1; let Channels { send, recv } = channels; let session = Session::new(send, our_role, initial_transmission); - let task_key = self.session_tasks.spawn_local( - id, - session - .run(self.store.clone(), self.key_store.clone(), recv, init) - .instrument(error_span!("session", peer = %peer.fmt_short())), - ); + let id = self.next_session_id(); + let store = self.store.clone(); + let key_store = self.key_store.clone(); + + let future = session + .run(store, key_store, recv, init) + .instrument(error_span!("session", peer = %peer.fmt_short())); + let task_key = self.session_tasks.spawn_local(id, future); + let active_session = ActiveSession { on_finish, task_key, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 20f6121025..5098ef2009 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -23,14 +23,14 @@ impl Session { ) -> Result<(), Error> { let ChannelReceivers { control_recv, - logical_recv, + logical_recv: + LogicalChannelReceivers { + reconciliation_recv, + mut static_tokens_recv, + mut capability_recv, + mut aoi_recv, + }, } = recv; - let LogicalChannelReceivers { - reconciliation_recv, - mut static_tokens_recv, - mut capability_recv, - mut aoi_recv, - } = logical_recv; // Spawn a task to handle incoming static tokens. self.spawn(error_span!("stt"), move |session| async move { diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 89afcaf1e1..8ded97cf24 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -110,8 +110,8 @@ impl Session { pub async fn next_aoi_intersection(&self) -> Option { poll_fn(|cx| { - let mut aoi_queue = &mut self.0.state.borrow_mut().intersetion_queue; - Pin::new(&mut aoi_queue).poll_next(cx) + let mut queue = &mut self.0.state.borrow_mut().intersection_queue; + Pin::new(&mut queue).poll_next(cx) }) .await } @@ -305,12 +305,19 @@ impl Session { state.challenge.reveal(our_role, msg.nonce) } + /// Bind a area of interest, and start reconciliation if this area of interest has an + /// intersection with a remote area of interest. + /// + /// Will fail if the capability is missing. Await [`Self::get_our_resource_eventually`] or + /// [`Self::get_their_resource_eventually`] before calling this. + /// + /// Returns `true` if the capability was newly bound, and `false` if not. pub fn bind_area_of_interest( &self, scope: Scope, - message: SetupBindAreaOfInterest, + msg: SetupBindAreaOfInterest, ) -> Result<(), Error> { - self.state_mut().bind_area_of_interest(scope, message) + self.state_mut().bind_area_of_interest(scope, msg) } pub async fn on_bind_area_of_interest( @@ -384,7 +391,7 @@ struct SessionState { their_range_counter: u64, our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, pending_entries: Option, - intersetion_queue: Queue, + intersection_queue: Queue, } impl SessionState { @@ -403,17 +410,10 @@ impl SessionState { their_range_counter: 0, our_uncovered_ranges: Default::default(), pending_entries: Default::default(), - intersetion_queue: Default::default(), + intersection_queue: Default::default(), } } - /// Bind a area of interest, and start reconciliation if this area of interest has an - /// intersection with a remote area of interest. - /// - /// Will fail if the capability is missing. Await [`Self::get_our_resource_eventually`] or - /// [`Self::get_their_resource_eventually`] before calling this. - /// - /// Returns `true` if the capability was newly bound, and `false` if not. fn bind_area_of_interest( &mut self, scope: Scope, @@ -466,7 +466,7 @@ impl SessionState { intersection, namespace: namespace.into(), }; - self.intersetion_queue.push_back(info); + self.intersection_queue.push_back(info); } } Ok(()) diff --git a/iroh-willow/src/worker.rs b/iroh-willow/src/worker.rs deleted file mode 100644 index e69de29bb2..0000000000 From 70ac7076ad36a005d62c5131d7b89b670729a22f Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 29 May 2024 16:39:48 +0200 Subject: [PATCH 050/198] renames --- iroh-willow/src/net.rs | 4 ++-- iroh-willow/src/proto.rs | 2 +- iroh-willow/src/proto/challenge.rs | 2 +- iroh-willow/src/proto/{wgps.rs => sync.rs} | 0 iroh-willow/src/proto/willow.rs | 2 +- iroh-willow/src/session.rs | 4 ++-- iroh-willow/src/session/channels.rs | 2 +- iroh-willow/src/session/error.rs | 2 +- iroh-willow/src/session/reconciler.rs | 2 +- iroh-willow/src/session/resource.rs | 2 +- iroh-willow/src/session/run.rs | 2 +- iroh-willow/src/session/state.rs | 2 +- iroh-willow/src/session/util.rs | 2 +- iroh-willow/src/store.rs | 2 +- 14 files changed, 15 insertions(+), 15 deletions(-) rename iroh-willow/src/proto/{wgps.rs => sync.rs} (100%) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 2c839742ca..60daaaac3f 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -11,7 +11,7 @@ use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrume use crate::{ actor::ActorHandle, - proto::wgps::{ + proto::sync::{ AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, @@ -295,7 +295,7 @@ mod tests { grouping::{AreaOfInterest, ThreeDRange}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, meadowcap::{AccessMode, McCapability, OwnedCapability}, - wgps::ReadCapability, + sync::ReadCapability, willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Role, SessionInit}, diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 168a8df3cd..05a43e4fff 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -2,5 +2,5 @@ pub mod challenge; pub mod grouping; pub mod keys; pub mod meadowcap; -pub mod wgps; +pub mod sync; pub mod willow; diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 137afb509e..00e5ae22dc 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -4,7 +4,7 @@ use crate::session::{Error, Role}; use super::{ keys::{UserPublicKey, UserSecretKey, UserSignature}, - wgps::{AccessChallenge, ChallengeHash}, + sync::{AccessChallenge, ChallengeHash}, }; #[derive(Debug)] diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/sync.rs similarity index 100% rename from iroh-willow/src/proto/wgps.rs rename to iroh-willow/src/proto/sync.rs diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 7a45c2bef7..b142401779 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -9,7 +9,7 @@ use crate::util::time::system_time_now; use super::{ keys::{self, UserSecretKey}, meadowcap::{self, attach_authorisation, is_authorised_write, InvalidParams, McCapability}, - wgps::{DynamicToken, StaticToken}, + sync::{DynamicToken, StaticToken}, }; /// A type for identifying namespaces. diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index e035da67f7..ad6979c438 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -2,8 +2,8 @@ use std::collections::{HashMap, HashSet}; use crate::proto::grouping::Area; use crate::proto::keys::NamespaceId; -use crate::proto::wgps::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}; -use crate::proto::{grouping::AreaOfInterest, wgps::ReadCapability}; +use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}; +use crate::proto::{grouping::AreaOfInterest, sync::ReadCapability}; pub mod channels; mod error; diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 017e292348..cf1f88f555 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -9,7 +9,7 @@ use futures_lite::Stream; use tracing::debug; use crate::{ - proto::wgps::{ + proto::sync::{ Channel, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, }, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 06dd3323f0..c1d1971b26 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,7 +1,7 @@ use ed25519_dalek::SignatureError; use crate::{ - proto::{meadowcap::InvalidCapability, wgps::ResourceHandle, willow::Unauthorised}, + proto::{meadowcap::InvalidCapability, sync::ResourceHandle, willow::Unauthorised}, store::KeyStoreError, util::channel::{ReadError, WriteError}, }; diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index bb94e3022d..fae91c262d 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -5,7 +5,7 @@ use crate::{ proto::{ grouping::ThreeDRange, keys::NamespaceId, - wgps::{ + sync::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, Message, ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 2e3cdc1cd3..2ad1c5716f 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -3,7 +3,7 @@ use std::{ task::{Context, Poll, Waker}, }; -use crate::proto::wgps::{ +use crate::proto::sync::{ AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, }; diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 5098ef2009..c32b01ada3 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -3,7 +3,7 @@ use strum::IntoEnumIterator; use tracing::{debug, error_span}; use crate::{ - proto::wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, + proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, store::{KeyStore, Shared, Store}, util::channel::Receiver, diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 8ded97cf24..34bc930230 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -16,7 +16,7 @@ use crate::{ challenge::ChallengeState, grouping::ThreeDRange, keys::NamespaceId, - wgps::{ + sync::{ AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, IntersectionHandle, IsHandle, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, diff --git a/iroh-willow/src/session/util.rs b/iroh-willow/src/session/util.rs index 0fa7bc729f..43e154eb0a 100644 --- a/iroh-willow/src/session/util.rs +++ b/iroh-willow/src/session/util.rs @@ -1,5 +1,5 @@ // use crate::{ -// proto::{grouping::ThreeDRange, keys::NamespaceId, wgps::AreaOfInterestHandle}, +// proto::{grouping::ThreeDRange, keys::NamespaceId, sync::AreaOfInterestHandle}, // store::{Store, SyncConfig}, // session::Error, // }; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index fbe8cf3a7d..6f394618b4 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -6,7 +6,7 @@ use crate::proto::{ grouping::{Range, RangeEnd, ThreeDRange}, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self}, - wgps::Fingerprint, + sync::Fingerprint, willow::{AuthorisedEntry, Entry, NamespaceId}, }; From 301222b88af96f79677ed4d48bf1b2dba97810fe Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 29 May 2024 22:16:45 +0200 Subject: [PATCH 051/198] feat: basic support for payloads --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/actor.rs | 19 ++- iroh-willow/src/net.rs | 39 ++++-- iroh-willow/src/proto/sync.rs | 8 +- iroh-willow/src/session/error.rs | 6 + iroh-willow/src/session/reconciler.rs | 167 ++++++++++++++++++++++++-- iroh-willow/src/session/run.rs | 8 +- iroh-willow/src/store.rs | 6 +- 9 files changed, 229 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b7ddca286..0952a786bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2931,6 +2931,7 @@ dependencies = [ "futures-util", "genawaiter", "iroh-base", + "iroh-blobs", "iroh-metrics", "iroh-net", "iroh-test", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 6174230230..5b0ec37d8b 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -27,6 +27,7 @@ genawaiter = "0.99.1" iroh-base = { version = "0.17.0", path = "../iroh-base" } iroh-metrics = { version = "0.17.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.17.0", path = "../iroh-net" } +iroh-blobs = { version = "0.17.0", path = "../iroh-blobs" } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = "0.8.5" rand_core = "0.6.4" diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 15a3e55b8d..f9cb9c870a 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; +use iroh_blobs::store::Store as PayloadStore; use tokio::sync::oneshot; use tracing::{debug, error, error_span, trace, warn, Instrument}; @@ -15,7 +16,7 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{Channels, Error, Role, Session, SessionInit}, - store::{KeyStore, ReadonlyStore, Shared, Store}, + store::{EntryStore, KeyStore, ReadonlyStore, Shared}, util::task::{JoinMap, TaskKey}, }; @@ -30,7 +31,12 @@ pub struct ActorHandle { } impl ActorHandle { - pub fn spawn(store: S, key_store: K, me: NodeId) -> ActorHandle { + pub fn spawn( + store: S, + key_store: K, + payload_store: P, + me: NodeId, + ) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); let join_handle = std::thread::Builder::new() .name("willow-actor".to_string()) @@ -41,6 +47,7 @@ impl ActorHandle { let actor = StorageThread { store: Shared::new(store), key_store: Shared::new(key_store), + payload_store, sessions: Default::default(), inbox_rx: rx, next_session_id: 0, @@ -194,16 +201,17 @@ struct ActiveSession { } #[derive(Debug)] -pub struct StorageThread { +pub struct StorageThread { inbox_rx: flume::Receiver, store: Shared, key_store: Shared, + payload_store: P, next_session_id: u64, sessions: HashMap, session_tasks: JoinMap>, } -impl StorageThread { +impl StorageThread { pub fn run(self) -> anyhow::Result<()> { let rt = tokio::runtime::Builder::new_current_thread() .build() @@ -260,9 +268,10 @@ impl StorageThread { let id = self.next_session_id(); let store = self.store.clone(); let key_store = self.key_store.clone(); + let payload_store = self.payload_store.clone(); let future = session - .run(store, key_store, recv, init) + .run(store, key_store, payload_store, recv, init) .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 60daaaac3f..dc45722c77 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -261,7 +261,7 @@ async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<( let mut joined = 0; while let Some(res) = join_set.join_next().await { joined += 1; - tracing::info!("joined {joined} remaining {}", join_set.len()); + tracing::trace!("joined {joined} tasks, remaining {}", join_set.len()); let res = match res { Ok(Ok(())) => Ok(()), Ok(Err(err)) => Err(err), @@ -282,7 +282,8 @@ mod tests { use futures_lite::StreamExt; use futures_util::FutureExt; - use iroh_base::{hash::Hash, key::SecretKey}; + use iroh_base::key::SecretKey; + use iroh_blobs::store::Store as PayloadStore; use iroh_net::MagicEndpoint; use rand::SeedableRng; use rand_core::CryptoRngCore; @@ -354,15 +355,28 @@ mod tests { let store_alfie = MemoryStore::default(); let keys_alfie = MemoryKeyStore::default(); - let handle_alfie = ActorHandle::spawn(store_alfie, keys_alfie, node_id_alfie); + let payloads_alfie = iroh_blobs::store::mem::Store::default(); + let handle_alfie = ActorHandle::spawn( + store_alfie, + keys_alfie, + payloads_alfie.clone(), + node_id_alfie, + ); let store_betty = MemoryStore::default(); let keys_betty = MemoryKeyStore::default(); - let handle_betty = ActorHandle::spawn(store_betty, keys_betty, node_id_betty); + let payloads_betty = iroh_blobs::store::mem::Store::default(); + let handle_betty = ActorHandle::spawn( + store_betty, + keys_betty, + payloads_betty.clone(), + node_id_betty, + ); let init_alfie = setup_and_insert( &mut rng, &handle_alfie, + &payloads_alfie, &namespace_secret, n_alfie, &mut expected_entries, @@ -372,6 +386,7 @@ mod tests { let init_betty = setup_and_insert( &mut rng, &handle_betty, + &payloads_betty, &namespace_secret, n_betty, &mut expected_entries, @@ -465,25 +480,35 @@ mod tests { Ok(entries) } - async fn setup_and_insert( + async fn setup_and_insert( rng: &mut impl CryptoRngCore, store: &ActorHandle, + payload_store: &P, namespace_secret: &NamespaceSecretKey, count: usize, track_entries: &mut impl Extend, path_fn: impl Fn(usize) -> Result, ) -> anyhow::Result { let user_secret = UserSecretKey::generate(rng); + let user_id_short = user_secret.id().fmt_short(); store.insert_secret(user_secret.clone()).await?; let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); for i in 0..count { + let payload = format!("hi, this is entry {i} for {user_id_short}") + .as_bytes() + .to_vec(); + let payload_len = payload.len() as u64; + let temp_tag = payload_store + .import_bytes(payload.into(), iroh_base::hash::BlobFormat::Raw) + .await?; + let payload_digest = *temp_tag.hash(); let path = path_fn(i).expect("invalid path"); let entry = Entry::new_current( namespace_secret.id(), user_secret.id(), path, - Hash::new("hello"), - 5, + payload_digest, + payload_len, ); track_entries.extend([entry.clone()]); let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 48266c36f5..40b937d0ad 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -405,6 +405,8 @@ pub enum ReconciliationMessage { SendFingerprint(ReconciliationSendFingerprint), AnnounceEntries(ReconciliationAnnounceEntries), SendEntry(ReconciliationSendEntry), + SendPayload(ReconciliationSendPayload), + TerminatePayload(ReconciliationTerminatePayload), } impl TryFrom for ReconciliationMessage { type Error = (); @@ -413,6 +415,8 @@ impl TryFrom for ReconciliationMessage { Message::ReconciliationSendFingerprint(msg) => Ok(msg.into()), Message::ReconciliationAnnounceEntries(msg) => Ok(msg.into()), Message::ReconciliationSendEntry(msg) => Ok(msg.into()), + Message::ReconciliationSendPayload(msg) => Ok(msg.into()), + Message::ReconciliationTerminatePayload(msg) => Ok(msg.into()), _ => Err(()), } } @@ -423,6 +427,8 @@ impl From for Message { ReconciliationMessage::SendFingerprint(message) => message.into(), ReconciliationMessage::AnnounceEntries(message) => message.into(), ReconciliationMessage::SendEntry(message) => message.into(), + ReconciliationMessage::SendPayload(message) => message.into(), + ReconciliationMessage::TerminatePayload(message) => message.into(), } } } @@ -575,7 +581,7 @@ pub struct ReconciliationSendEntry { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationSendPayload { // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. - bytes: bytes::Bytes, + pub bytes: bytes::Bytes, } /// Indicate that no more bytes will be transmitted for the currently transmitted Payload as part of set reconciliation. diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index c1d1971b26..b300c30d4c 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -10,6 +10,12 @@ use crate::{ pub enum Error { #[error("local store failed: {0}")] Store(#[from] anyhow::Error), + #[error("payload store failed: {0}")] + PayloadStore(std::io::Error), + #[error("payload digest does not match expected digest")] + PayloadDigestMismatch, + #[error("payload size does not match expected size")] + PayloadSizeMismatch, #[error("local store failed: {0}")] KeyStore(#[from] KeyStoreError), #[error("failed to receive data: {0}")] diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index fae91c262d..9e55d89c3a 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,6 +1,13 @@ -use futures_lite::StreamExt; +use bytes::Bytes; +use futures_lite::{future::BoxedLocal, FutureExt, StreamExt}; use tracing::{debug, trace}; +use iroh_blobs::{ + store::{bao_tree::io::fsm::AsyncSliceReader, MapEntry, Store as PayloadStore}, + util::progress::IgnoreProgressSender, + TempTag, +}; + use crate::{ proto::{ grouping::ThreeDRange, @@ -8,35 +15,104 @@ use crate::{ sync::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, Message, ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, - ReconciliationSendFingerprint, + ReconciliationSendFingerprint, ReconciliationSendPayload, + ReconciliationTerminatePayload, }, - willow::AuthorisedEntry, + willow::{AuthorisedEntry, Entry}, }, session::{channels::MessageReceiver, AreaOfInterestIntersection, Error, Session}, - store::{ReadonlyStore, Shared, SplitAction, Store, SyncConfig}, + store::{EntryStore, ReadonlyStore, Shared, SplitAction, SyncConfig}, util::channel::WriteError, }; +#[derive(Debug)] +struct CurrentPayload { + entry: Entry, + writer: Option, +} + +#[derive(derive_more::Debug)] +struct PayloadWriter { + #[debug(skip)] + fut: BoxedLocal>, + sender: flume::Sender>, +} + +impl CurrentPayload { + async fn recv_chunk(&mut self, store: P, chunk: Bytes) -> anyhow::Result<()> { + let writer = self.writer.get_or_insert_with(move || { + let (tx, rx) = flume::bounded(1); + let fut = async move { + store + .import_stream( + rx.into_stream(), + iroh_blobs::BlobFormat::Raw, + IgnoreProgressSender::default(), + ) + .await + }; + let writer = PayloadWriter { + fut: fut.boxed_local(), + sender: tx, + }; + writer + }); + writer.sender.send_async(Ok(chunk)).await?; + Ok(()) + } + + fn is_active(&self) -> bool { + self.writer.is_some() + } + + async fn finalize(self) -> Result<(), Error> { + let writer = self + .writer + .ok_or_else(|| Error::InvalidMessageInCurrentState)?; + drop(writer.sender); + let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; + if *tag.hash() != self.entry.payload_digest { + return Err(Error::PayloadDigestMismatch); + } + if len != self.entry.payload_length { + return Err(Error::PayloadDigestMismatch); + } + // TODO: protect from gc + // we could store a tag for each blob + // however we really want reference counting here, not individual tags + // can also fallback to the naive impl from iroh-docs to just protect all docs hashes on gc + // let hash_and_format = *tag.inner(); + // let name = b"foo"; + // store.set_tag(name, Some(hash_and_format)); + Ok(()) + } +} + #[derive(derive_more::Debug)] -pub struct Reconciler { +pub struct Reconciler { session: Session, store: Shared, recv: MessageReceiver, snapshot: S::Snapshot, + current_payload: Option, + payload_store: P, } -impl Reconciler { +impl Reconciler { pub fn new( session: Session, store: Shared, + payload_store: P, recv: MessageReceiver, ) -> Result { let snapshot = store.snapshot()?; Ok(Self { recv, store, + payload_store, snapshot, session, + current_payload: None, }) } @@ -56,7 +132,7 @@ impl Reconciler { } } } - if self.session.reconciliation_is_complete() { + if self.session.reconciliation_is_complete() && !self.has_active_payload() { debug!("reconciliation complete, close session"); break; } @@ -73,6 +149,10 @@ impl Reconciler { self.on_announce_entries(message).await? } ReconciliationMessage::SendEntry(message) => self.on_send_entry(message).await?, + ReconciliationMessage::SendPayload(message) => self.on_send_payload(message).await?, + ReconciliationMessage::TerminatePayload(message) => { + self.on_terminate_payload(message).await? + } }; Ok(()) } @@ -143,6 +223,7 @@ impl Reconciler { message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { trace!("on_announce_entries start"); + self.assert_no_active_payload()?; let (namespace, range_count) = self.session.on_announce_entries(&message).await?; if message.want_response { self.announce_and_send_entries( @@ -161,6 +242,7 @@ impl Reconciler { } async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { + self.assert_no_active_payload()?; let static_token = self .session .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) @@ -176,9 +258,52 @@ impl Reconciler { self.store.ingest_entry(&authorised_entry)?; + self.current_payload = Some(CurrentPayload { + entry: authorised_entry.into_entry(), + writer: None, + }); + + Ok(()) + } + + async fn on_send_payload(&mut self, message: ReconciliationSendPayload) -> Result<(), Error> { + let state = self + .current_payload + .as_mut() + .ok_or(Error::InvalidMessageInCurrentState)?; + state + .recv_chunk(self.payload_store.clone(), message.bytes) + .await?; + Ok(()) + } + + async fn on_terminate_payload( + &mut self, + _message: ReconciliationTerminatePayload, + ) -> Result<(), Error> { + let state = self + .current_payload + .take() + .ok_or(Error::InvalidMessageInCurrentState)?; + state.finalize().await?; Ok(()) } + fn assert_no_active_payload(&self) -> Result<(), Error> { + if self.has_active_payload() { + Err(Error::InvalidMessageInCurrentState) + } else { + Ok(()) + } + } + + fn has_active_payload(&self) -> bool { + self.current_payload + .as_ref() + .map(|cp| cp.is_active()) + .unwrap_or(false) + } + async fn send_fingerprint( &mut self, range: ThreeDRange, @@ -240,12 +365,40 @@ impl Reconciler { if let Some(msg) = static_token_bind_msg { self.send(msg).await?; } + let digest = entry.payload_digest; let msg = ReconciliationSendEntry { entry: LengthyEntry::new(entry, available), static_token_handle, dynamic_token, }; self.send(msg).await?; + + // TODO: only send payload if configured to do so and/or under size limit. + let send_payloads = true; + if send_payloads { + let payload_entry = self + .payload_store + .get(&digest) + .await + .map_err(Error::PayloadStore)?; + if let Some(entry) = payload_entry { + let mut reader = entry.data_reader().await.map_err(Error::PayloadStore)?; + let len: u64 = entry.size().value(); + let chunk_size = 1024usize * 64; + let mut pos = 0; + while pos < len { + let bytes = reader + .read_at(pos, chunk_size) + .await + .map_err(Error::PayloadStore)?; + pos += bytes.len() as u64; + let msg = ReconciliationSendPayload { bytes }; + self.send(msg).await?; + } + let msg = ReconciliationTerminatePayload; + self.send(msg).await?; + } + } } Ok(()) } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index c32b01ada3..56a0157180 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,11 +1,12 @@ use futures_lite::StreamExt; +use iroh_blobs::store::Store as PayloadStore; use strum::IntoEnumIterator; use tracing::{debug, error_span}; use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, - store::{KeyStore, Shared, Store}, + store::{EntryStore, KeyStore, Shared}, util::channel::Receiver, }; @@ -14,10 +15,11 @@ use super::{channels::ChannelReceivers, reconciler::Reconciler}; const INITIAL_GUARANTEES: u64 = u64::MAX; impl Session { - pub async fn run( + pub async fn run( self, store: Shared, key_store: Shared, + payload_store: P, recv: ChannelReceivers, init: SessionInit, ) -> Result<(), Error> { @@ -58,7 +60,7 @@ impl Session { // Spawn a task to handle reconciliation messages self.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, store, reconciliation_recv)? + Reconciler::new(session, store, payload_store, reconciliation_recv)? .run() .await }); diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 6f394618b4..e270333838 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -51,7 +51,7 @@ pub trait KeyStore: Send + 'static { ) -> Result; } -pub trait Store: ReadonlyStore + 'static { +pub trait EntryStore: ReadonlyStore + 'static { type Snapshot: ReadonlyStore + Clone + Send; fn snapshot(&mut self) -> Result; @@ -101,7 +101,7 @@ impl Shared { } } -impl Shared { +impl Shared { pub fn snapshot(&self) -> Result { Ok(self.0.borrow_mut().snapshot()?) } @@ -335,7 +335,7 @@ impl ReadonlyStore for Arc { } } -impl Store for MemoryStore { +impl EntryStore for MemoryStore { type Snapshot = Arc; // type KeyStore = MemoryKeyStore; fn snapshot(&mut self) -> Result { From e86a949e71b8375f3b4a9167532c8b0ca59fa1e2 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 31 May 2024 17:44:06 +0200 Subject: [PATCH 052/198] wip: live data mode --- iroh-willow/src/actor.rs | 19 +- iroh-willow/src/net.rs | 293 +++++++++++++++++++++----- iroh-willow/src/proto/sync.rs | 102 ++++++++- iroh-willow/src/session.rs | 22 +- iroh-willow/src/session/channels.rs | 9 +- iroh-willow/src/session/data.rs | 148 +++++++++++++ iroh-willow/src/session/payload.rs | 156 ++++++++++++++ iroh-willow/src/session/reconciler.rs | 190 +++++------------ iroh-willow/src/session/run.rs | 45 +++- iroh-willow/src/session/state.rs | 117 +++++++--- iroh-willow/src/store.rs | 67 +++--- iroh-willow/src/store/broadcaster.rs | 134 ++++++++++++ iroh-willow/src/util/task.rs | 15 +- 13 files changed, 1014 insertions(+), 303 deletions(-) create mode 100644 iroh-willow/src/session/data.rs create mode 100644 iroh-willow/src/session/payload.rs create mode 100644 iroh-willow/src/store/broadcaster.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index f9cb9c870a..25fc7ab1d6 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -16,7 +16,10 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{Channels, Error, Role, Session, SessionInit}, - store::{EntryStore, KeyStore, ReadonlyStore, Shared}, + store::{ + broadcaster::{Broadcaster, Origin}, + EntryStore, KeyStore, ReadonlyStore, Shared, + }, util::task::{JoinMap, TaskKey}, }; @@ -44,8 +47,9 @@ impl ActorHandle { let span = error_span!("willow_thread", me=%me.fmt_short()); let _guard = span.enter(); + let store = Broadcaster::new(Shared::new(store)); let actor = StorageThread { - store: Shared::new(store), + store, key_store: Shared::new(key_store), payload_store, sessions: Default::default(), @@ -203,7 +207,7 @@ struct ActiveSession { #[derive(Debug)] pub struct StorageThread { inbox_rx: flume::Receiver, - store: Shared, + store: Broadcaster, key_store: Shared, payload_store: P, next_session_id: u64, @@ -263,10 +267,10 @@ impl StorageThread { on_finish, } => { let Channels { send, recv } = channels; - let session = Session::new(send, our_role, initial_transmission); - let id = self.next_session_id(); - let store = self.store.clone(); + let session = Session::new(id, init.mode, our_role, send, initial_transmission); + + let store: Broadcaster = self.store.clone(); let key_store = self.key_store.clone(); let payload_store = self.payload_store.clone(); @@ -297,7 +301,7 @@ impl StorageThread { } } ToActor::IngestEntry { entry, reply } => { - let res = self.store.ingest_entry(&entry); + let res = self.store.ingest_entry(&entry, Origin::Local); reply.send(res).ok(); } ToActor::InsertSecret { secret, reply } => { @@ -309,6 +313,7 @@ impl StorageThread { } fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { + self.store.unsubscribe(session_id); let session = self.sessions.remove(session_id); if let Some(session) = session { session.on_finish.send(result).ok(); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index dc45722c77..ec50d3227e 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -159,18 +159,21 @@ async fn open_logical_channels( let stt = take_and_spawn_channel(LogicalChannel::StaticToken)?; let aoi = take_and_spawn_channel(LogicalChannel::AreaOfInterest)?; let cap = take_and_spawn_channel(LogicalChannel::Capability)?; + let dat = take_and_spawn_channel(LogicalChannel::Data)?; Ok(( LogicalChannelSenders { reconciliation: rec.0, static_tokens: stt.0, aoi: aoi.0, capability: cap.0, + data: dat.0, }, LogicalChannelReceivers { reconciliation_recv: rec.1.into(), static_tokens_recv: stt.1.into(), aoi_recv: aoi.1.into(), capability_recv: cap.1.into(), + data_recv: dat.1.into(), }, )) } @@ -284,7 +287,7 @@ mod tests { use futures_util::FutureExt; use iroh_base::key::SecretKey; use iroh_blobs::store::Store as PayloadStore; - use iroh_net::MagicEndpoint; + use iroh_net::{MagicEndpoint, NodeAddr, NodeId}; use rand::SeedableRng; use rand_core::CryptoRngCore; use tracing::{debug, info}; @@ -299,7 +302,7 @@ mod tests { sync::ReadCapability, willow::{Entry, InvalidPath, Path, WriteCapability}, }, - session::{Role, SessionInit}, + session::{Role, SessionInit, SessionMode}, store::{MemoryKeyStore, MemoryStore}, }; @@ -320,20 +323,137 @@ mod tests { .parse() .unwrap(); - let ep_alfie = MagicEndpoint::builder() - .secret_key(SecretKey::generate_with_rng(&mut rng)) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let ep_betty = MagicEndpoint::builder() - .secret_key(SecretKey::generate_with_rng(&mut rng)) + let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; + let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; + + debug!("start connect"); + let (conn_alfie, conn_betty) = tokio::join!( + async move { ep_alfie.connect(addr_betty, ALPN).await }, + async move { + let connecting = ep_betty.accept().await.unwrap(); + connecting.await + } + ); + let conn_alfie = conn_alfie.unwrap(); + let conn_betty = conn_betty.unwrap(); + info!("connected! now start reconciliation"); + + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); + let namespace_id: NamespaceId = namespace_secret.public_key().into(); + + let start = Instant::now(); + let mut expected_entries = BTreeSet::new(); + + let store_alfie = MemoryStore::default(); + let keys_alfie = MemoryKeyStore::default(); + let payloads_alfie = iroh_blobs::store::mem::Store::default(); + let handle_alfie = ActorHandle::spawn( + store_alfie, + keys_alfie, + payloads_alfie.clone(), + node_id_alfie, + ); + + let store_betty = MemoryStore::default(); + let keys_betty = MemoryKeyStore::default(); + let payloads_betty = iroh_blobs::store::mem::Store::default(); + let handle_betty = ActorHandle::spawn( + store_betty, + keys_betty, + payloads_betty.clone(), + node_id_betty, + ); + + let (init_alfie, _, _) = setup_and_insert( + SessionMode::ReconcileOnce, + &mut rng, + &handle_alfie, + &payloads_alfie, + &namespace_secret, + n_alfie, + &mut expected_entries, + |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), + ) + .await?; + let (init_betty, _, _) = setup_and_insert( + SessionMode::ReconcileOnce, + &mut rng, + &handle_betty, + &payloads_betty, + &namespace_secret, + n_betty, + &mut expected_entries, + |n| Path::new(&[b"betty", n.to_string().as_bytes()]), + ) + .await?; + + debug!("init constructed"); + println!("init took {:?}", start.elapsed()); + let start = Instant::now(); + + let (res_alfie, res_betty) = tokio::join!( + run( + node_id_alfie, + handle_alfie.clone(), + conn_alfie, + Role::Alfie, + init_alfie + ) + .inspect(|res| info!("alfie done: {res:?}")), + run( + node_id_betty, + handle_betty.clone(), + conn_betty, + Role::Betty, + init_betty + ) + .inspect(|res| info!("betty done: {res:?}")), + ); + info!(time=?start.elapsed(), "reconciliation finished"); + + info!("alfie res {:?}", res_alfie); + info!("betty res {:?}", res_betty); + // info!( + // "alfie store {:?}", + // get_entries_debug(&handle_alfie, namespace_id).await? + // ); + // info!( + // "betty store {:?}", + // get_entries_debug(&handle_betty, namespace_id).await? + // ); + assert!(res_alfie.is_ok()); + assert!(res_betty.is_ok()); + let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; + let betty_entries = get_entries(&handle_betty, namespace_id).await?; + info!("alfie has now {} entries", alfie_entries.len()); + info!("betty has now {} entries", betty_entries.len()); + // not using assert_eq because it would print a lot in case of failure + assert!(alfie_entries == expected_entries, "alfie expected entries"); + assert!(betty_entries == expected_entries, "betty expected entries"); + + Ok(()) + } + + pub async fn create_endpoint( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<(MagicEndpoint, NodeId, NodeAddr)> { + let ep = MagicEndpoint::builder() + .secret_key(SecretKey::generate_with_rng(rng)) .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; + let addr = ep.my_addr().await?; + let node_id = ep.node_id(); + Ok((ep, node_id, addr)) + } - let addr_betty = ep_betty.my_addr().await?; - let node_id_betty = ep_betty.node_id(); - let node_id_alfie = ep_alfie.node_id(); + #[tokio::test(flavor = "multi_thread")] + async fn live_data() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + + let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; + let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; debug!("start connect"); let (conn_alfie, conn_betty) = tokio::join!( @@ -373,22 +493,24 @@ mod tests { node_id_betty, ); - let init_alfie = setup_and_insert( + let (init_alfie, secret_alfie, cap_alfie) = setup_and_insert( + SessionMode::Live, &mut rng, &handle_alfie, &payloads_alfie, &namespace_secret, - n_alfie, + 2, &mut expected_entries, |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), ) .await?; - let init_betty = setup_and_insert( + let (init_betty, _secret_betty, _cap_betty) = setup_and_insert( + SessionMode::Live, &mut rng, &handle_betty, &payloads_betty, &namespace_secret, - n_betty, + 2, &mut expected_entries, |n| Path::new(&[b"betty", n.to_string().as_bytes()]), ) @@ -398,33 +520,31 @@ mod tests { println!("init took {:?}", start.elapsed()); let start = Instant::now(); - // tokio::task::spawn({ - // let handle_alfie = handle_alfie.clone(); - // let handle_betty = handle_betty.clone(); - // async move { - // loop { - // info!( - // "alfie count: {}", - // handle_alfie - // .get_entries(namespace_id, ThreeDRange::full()) - // .await - // .unwrap() - // .count() - // .await - // ); - // info!( - // "betty count: {}", - // handle_betty - // .get_entries(namespace_id, ThreeDRange::full()) - // .await - // .unwrap() - // .count() - // .await - // ); - // tokio::time::sleep(Duration::from_secs(1)).await; - // } - // } - // }); + let _insert_task_alfie = tokio::task::spawn({ + let store = handle_alfie.clone(); + let payload_store = payloads_alfie.clone(); + let count = 3; + let content_fn = |i: usize| format!("alfie live insert {i} for alfie"); + let path_fn = |i: usize| Path::new(&[b"alfie-live", i.to_string().as_bytes()]); + let mut track_entries = vec![]; + + async move { + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + insert( + &store, + &payload_store, + count, + namespace_id, + &secret_alfie, + &cap_alfie, + content_fn, + path_fn, + &mut track_entries, + ) + .await + .expect("failed to insert"); + } + }); let (res_alfie, res_betty) = tokio::join!( run( @@ -468,6 +588,7 @@ mod tests { Ok(()) } + async fn get_entries( store: &ActorHandle, namespace: NamespaceId, @@ -480,23 +601,19 @@ mod tests { Ok(entries) } - async fn setup_and_insert( - rng: &mut impl CryptoRngCore, + async fn insert( store: &ActorHandle, payload_store: &P, - namespace_secret: &NamespaceSecretKey, count: usize, - track_entries: &mut impl Extend, + namespace_id: NamespaceId, + user_secret: &UserSecretKey, + write_cap: &WriteCapability, + content_fn: impl Fn(usize) -> String, path_fn: impl Fn(usize) -> Result, - ) -> anyhow::Result { - let user_secret = UserSecretKey::generate(rng); - let user_id_short = user_secret.id().fmt_short(); - store.insert_secret(user_secret.clone()).await?; - let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); + track_entries: &mut impl Extend, + ) -> anyhow::Result<()> { for i in 0..count { - let payload = format!("hi, this is entry {i} for {user_id_short}") - .as_bytes() - .to_vec(); + let payload = content_fn(i).as_bytes().to_vec(); let payload_len = payload.len() as u64; let temp_tag = payload_store .import_bytes(payload.into(), iroh_base::hash::BlobFormat::Raw) @@ -504,7 +621,7 @@ mod tests { let payload_digest = *temp_tag.hash(); let path = path_fn(i).expect("invalid path"); let entry = Entry::new_current( - namespace_secret.id(), + namespace_id, user_secret.id(), path, payload_digest, @@ -514,8 +631,38 @@ mod tests { let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; store.ingest_entry(entry).await?; } - let init = SessionInit::with_interest(read_cap, AreaOfInterest::full()); - Ok(init) + Ok(()) + } + + async fn setup_and_insert( + mode: SessionMode, + rng: &mut impl CryptoRngCore, + store: &ActorHandle, + payload_store: &P, + namespace_secret: &NamespaceSecretKey, + count: usize, + track_entries: &mut impl Extend, + path_fn: impl Fn(usize) -> Result, + ) -> anyhow::Result<(SessionInit, UserSecretKey, WriteCapability)> { + let user_secret = UserSecretKey::generate(rng); + let user_id_short = user_secret.id().fmt_short(); + store.insert_secret(user_secret.clone()).await?; + let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); + let content_fn = |i| format!("initial entry {i} for {user_id_short}"); + insert( + store, + payload_store, + count, + namespace_secret.id(), + &user_secret, + &write_cap, + content_fn, + path_fn, + track_entries, + ) + .await?; + let init = SessionInit::with_interest(mode, read_cap, AreaOfInterest::full()); + Ok((init, user_secret, write_cap)) } fn create_capabilities( @@ -547,4 +694,34 @@ mod tests { // entries.sort(); // Ok(entries) // } + // + // + // + // tokio::task::spawn({ + // let handle_alfie = handle_alfie.clone(); + // let handle_betty = handle_betty.clone(); + // async move { + // loop { + // info!( + // "alfie count: {}", + // handle_alfie + // .get_entries(namespace_id, ThreeDRange::full()) + // .await + // .unwrap() + // .count() + // .await + // ); + // info!( + // "betty count: {}", + // handle_betty + // .get_entries(namespace_id, ThreeDRange::full()) + // .await + // .unwrap() + // .count() + // .await + // ); + // tokio::time::sleep(Duration::from_secs(1)).await; + // } + // } + // }); } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 40b937d0ad..a01ad9e11b 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -119,8 +119,8 @@ pub enum LogicalChannel { // TODO: use all the channels // right now everything but reconciliation goes into the control channel // - // /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. - // Data, + /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. + Data, // // /// Logical channel for controlling the binding of new IntersectionHandles. // Intersection, @@ -154,6 +154,7 @@ impl LogicalChannel { LogicalChannel::StaticToken => "StT", LogicalChannel::Capability => "Cap", LogicalChannel::AreaOfInterest => "AoI", + LogicalChannel::Data => "Dat", } } @@ -163,6 +164,7 @@ impl LogicalChannel { 3 => Ok(Self::Capability), 4 => Ok(Self::StaticToken), 5 => Ok(Self::Reconciliation), + 6 => Ok(Self::Data), _ => Err(InvalidChannelId), } } @@ -173,6 +175,7 @@ impl LogicalChannel { LogicalChannel::Capability => 3, LogicalChannel::StaticToken => 4, LogicalChannel::Reconciliation => 5, + LogicalChannel::Data => 6, } } } @@ -276,9 +279,12 @@ pub enum Message { ReconciliationSendPayload(ReconciliationSendPayload), #[debug("{:?}", _0)] ReconciliationTerminatePayload(ReconciliationTerminatePayload), - // DataSendEntry - // DataSendPayload - // DataSetMetadata + #[debug("{:?}", _0)] + DataSendEntry(DataSendEntry), + #[debug("{:?}", _0)] + DataSendPayload(DataSendPayload), + #[debug("{:?}", _0)] + DataSetMetadata(DataSetMetadata), // DataBindPayloadRequest // DataReplyPayload #[debug("{:?}", _0)] @@ -347,6 +353,7 @@ impl Message { Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), + Message::ReconciliationSendFingerprint(_) | Message::ReconciliationAnnounceEntries(_) | Message::ReconciliationSendEntry(_) @@ -354,6 +361,11 @@ impl Message { | Message::ReconciliationTerminatePayload(_) => { Channel::Logical(LogicalChannel::Reconciliation) } + + Message::DataSendEntry(_) + | Message::DataSendPayload(_) + | Message::DataSetMetadata(_) => Channel::Logical(LogicalChannel::Data), + Message::CommitmentReveal(_) | Message::ControlIssueGuarantee(_) | Message::ControlAbsolve(_) @@ -432,6 +444,33 @@ impl From for Message { } } } + +#[derive(Debug, derive_more::From, strum::Display)] +pub enum DataMessage { + SendEntry(DataSendEntry), + SendPayload(DataSendPayload), + SetMetadata(DataSetMetadata), +} +impl TryFrom for DataMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::DataSendEntry(msg) => Ok(msg.into()), + Message::DataSendPayload(msg) => Ok(msg.into()), + Message::DataSetMetadata(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} +impl From for Message { + fn from(message: DataMessage) -> Self { + match message { + DataMessage::SendEntry(message) => message.into(), + DataMessage::SendPayload(message) => message.into(), + DataMessage::SetMetadata(message) => message.into(), + } + } +} // // impl Encoder for ReconciliationMessage { // fn encoded_len(&self) -> usize { @@ -566,7 +605,7 @@ pub struct ReconciliationAnnounceEntries { pub covers: Option, } -/// Transmit a LengthyEntry as part of 3d range-based set reconciliation. +/// Transmit a [`LengthyEntry`] as part of 3d range-based set reconciliation. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationSendEntry { /// The LengthyEntry itself. @@ -588,6 +627,57 @@ pub struct ReconciliationSendPayload { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationTerminatePayload; +/// Transmit an AuthorisedEntry to the other peer, and optionally prepare transmission of its Payload. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSendEntry { + /// The Entry to transmit. + pub entry: Entry, + /// A [`StaticTokenHandle`] bound to the StaticToken of the Entry to transmit. + pub static_token_handle: StaticTokenHandle, + /// The DynamicToken of the Entry to transmit. + pub dynamic_token: DynamicToken, + /// The offset in the Payload in bytes at which Payload transmission will begin. + /// + /// If this is equal to the Entry’s payload_length, the Payload will not be transmitted. + pub offset: u64, +} + +/// Transmit some transformed Payload bytes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSendPayload { + // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. + pub bytes: bytes::Bytes, +} + +/// Express preferences for Payload transfer in the intersection of two AreaOfInterests. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSetMetadata { + /// An AreaOfInterestHandle, bound by the sender of this message. + sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message. + receiver_handle: AreaOfInterestHandle, + // Whether the other peer should eagerly forward Payloads in this intersection. + is_eager: bool, +} + +// /// Bind an Entry to a PayloadRequestHandle and request transmission of its Payload from an offset. +// #[derive(Debug, Clone, Serialize, Deserialize)] +// pub struct DataBindPayloadRequest { +// /// The Entry to request. +// entry: Entry, +// /// The offset in the Payload starting from which the sender would like to receive the Payload bytes. +// offset: u64, +// /// A resource handle for a ReadCapability bound by the sender that grants them read access to the bound Entry. +// capability: CapabilityHandle, +// } +// +// /// Set up the state for replying to a DataBindPayloadRequest message. +// #[derive(Debug, Clone, Serialize, Deserialize)] +// pub struct DataReplyPayload { +// /// The PayloadRequestHandle to which to reply. +// handle: u64, +// } + /// An Entry together with information about how much of its Payload a peer holds. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index ad6979c438..b6e8284f58 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -6,7 +6,9 @@ use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}; use crate::proto::{grouping::AreaOfInterest, sync::ReadCapability}; pub mod channels; +mod data; mod error; +mod payload; mod reconciler; mod resource; mod run; @@ -51,17 +53,35 @@ impl Role { } } +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum SessionMode { + ReconcileOnce, + Live, +} + +impl SessionMode { + fn is_live(&self) -> bool { + *self == Self::Live + } +} + /// Options to initialize a session with. #[derive(Debug)] pub struct SessionInit { /// List of interests we wish to synchronize, together with our capabilities to read them. pub interests: HashMap>, + pub mode: SessionMode, } impl SessionInit { /// Returns a [`SessionInit`] with a single interest. - pub fn with_interest(capability: ReadCapability, area_of_interest: AreaOfInterest) -> Self { + pub fn with_interest( + mode: SessionMode, + capability: ReadCapability, + area_of_interest: AreaOfInterest, + ) -> Self { Self { + mode, interests: HashMap::from_iter([(capability, HashSet::from_iter([area_of_interest]))]), } } diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index cf1f88f555..1683ad7b87 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -10,8 +10,8 @@ use tracing::debug; use crate::{ proto::sync::{ - Channel, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, - SetupBindReadCapability, SetupBindStaticToken, + Channel, DataMessage, LogicalChannel, Message, ReconciliationMessage, + SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, }, util::channel::{Receiver, Sender, WriteError}, }; @@ -70,6 +70,7 @@ pub struct LogicalChannelReceivers { pub static_tokens_recv: MessageReceiver, pub capability_recv: MessageReceiver, pub aoi_recv: MessageReceiver, + pub data_recv: MessageReceiver, } impl LogicalChannelReceivers { @@ -78,6 +79,7 @@ impl LogicalChannelReceivers { self.static_tokens_recv.close(); self.capability_recv.close(); self.aoi_recv.close(); + self.data_recv.close(); } } @@ -87,6 +89,7 @@ pub struct LogicalChannelSenders { pub static_tokens: Sender, pub aoi: Sender, pub capability: Sender, + pub data: Sender, } impl LogicalChannelSenders { pub fn close(&self) { @@ -94,6 +97,7 @@ impl LogicalChannelSenders { self.static_tokens.close(); self.aoi.close(); self.capability.close(); + self.data.close(); } pub fn get(&self, channel: LogicalChannel) -> &Sender { @@ -102,6 +106,7 @@ impl LogicalChannelSenders { LogicalChannel::StaticToken => &self.static_tokens, LogicalChannel::Capability => &self.capability, LogicalChannel::AreaOfInterest => &self.aoi, + LogicalChannel::Data => &self.data, } } } diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs new file mode 100644 index 0000000000..b1f002ac19 --- /dev/null +++ b/iroh-willow/src/session/data.rs @@ -0,0 +1,148 @@ +use futures_lite::StreamExt; + +use iroh_blobs::store::Store as PayloadStore; +use tokio::sync::broadcast; + +use crate::{ + proto::{ + sync::{DataMessage, DataSendEntry, DataSendPayload}, + willow::AuthorisedEntry, + }, + session::Error, + store::{ + broadcaster::{Broadcaster, Origin}, + EntryStore, + }, +}; + +use super::channels::MessageReceiver; +use super::payload::{send_payload_chunked, CurrentPayload}; +use super::Session; + +#[derive(derive_more::Debug)] +pub struct DataSender { + session: Session, + store: Broadcaster, + payload_store: P, +} + +impl DataSender { + pub fn new(session: Session, store: Broadcaster, payload_store: P) -> Self { + Self { + session, + store, + payload_store, + } + } + pub async fn run(mut self) -> Result<(), Error> { + let mut stream = self.store.subscribe(*self.session.id()); + loop { + match stream.recv().await { + Ok(entry) => { + self.send_entry(entry).await?; + } + Err(broadcast::error::RecvError::Closed) => break, + Err(broadcast::error::RecvError::Lagged(_count)) => { + // TODO + } + } + } + Ok(()) + } + + async fn send_entry(&mut self, authorised_entry: AuthorisedEntry) -> Result<(), Error> { + let (entry, token) = authorised_entry.into_parts(); + let (static_token, dynamic_token) = token.into_parts(); + // TODO: partial payloads + // let available = entry.payload_length; + let (static_token_handle, static_token_bind_msg) = + self.session.bind_our_static_token(static_token); + if let Some(msg) = static_token_bind_msg { + self.session.send(msg).await?; + } + let digest = entry.payload_digest; + let msg = DataSendEntry { + entry, + static_token_handle, + dynamic_token, + offset: 0, + }; + self.session.send(msg).await?; + + // TODO: only send payload if configured to do so and/or under size limit. + let send_payloads = true; + let chunk_size = 1024 * 64; + if send_payloads { + send_payload_chunked( + digest, + &self.payload_store, + &self.session, + chunk_size, + |bytes| DataSendPayload { bytes }.into(), + ) + .await?; + } + Ok(()) + } +} + +#[derive(derive_more::Debug)] +pub struct DataReceiver { + session: Session, + store: Broadcaster, + payload_store: P, + current_payload: CurrentPayload, +} + +impl DataReceiver { + pub fn new(session: Session, store: Broadcaster, payload_store: P) -> Self { + Self { + session, + store, + payload_store, + current_payload: Default::default(), + } + } + pub async fn run(mut self, mut recv: MessageReceiver) -> Result<(), Error> { + while let Some(message) = recv.try_next().await? { + self.on_message(message).await?; + } + Ok(()) + } + + async fn on_message(&mut self, message: DataMessage) -> Result<(), Error> { + match message { + DataMessage::SendEntry(message) => self.on_send_entry(message).await?, + DataMessage::SendPayload(message) => self.on_send_payload(message).await?, + DataMessage::SetMetadata(_) => todo!(), + } + Ok(()) + } + + async fn on_send_entry(&mut self, message: DataSendEntry) -> Result<(), Error> { + self.current_payload.assert_inactive()?; + let authorised_entry = self + .session + .authorise_sent_entry( + message.entry, + message.static_token_handle, + message.dynamic_token, + ) + .await?; + self.store + .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; + self.current_payload + .set(authorised_entry.into_entry(), None)?; + Ok(()) + } + + async fn on_send_payload(&mut self, message: DataSendPayload) -> Result<(), Error> { + self.current_payload + .recv_chunk(self.payload_store.clone(), message.bytes) + .await?; + if self.current_payload.is_complete() { + self.current_payload.finalize().await?; + } + Ok(()) + } +} diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs new file mode 100644 index 0000000000..12534eedeb --- /dev/null +++ b/iroh-willow/src/session/payload.rs @@ -0,0 +1,156 @@ +use bytes::Bytes; +use futures_lite::{future::BoxedLocal, FutureExt}; +// use iroh_blobs::{store::Store as PayloadStore, util::progress::IgnoreProgressSender, TempTag}; +use iroh_blobs::{ + store::{bao_tree::io::fsm::AsyncSliceReader, MapEntry, Store as PayloadStore}, + util::progress::IgnoreProgressSender, + TempTag, +}; + +use crate::proto::{ + sync::Message, + willow::{Entry, PayloadDigest}, +}; + +use super::{Error, Session}; + +pub async fn send_payload_chunked( + digest: PayloadDigest, + payload_store: &P, + session: &Session, + chunk_size: usize, + map: impl Fn(Bytes) -> Message, +) -> Result { + let payload_entry = payload_store + .get(&digest) + .await + .map_err(Error::PayloadStore)?; + if let Some(entry) = payload_entry { + let mut reader = entry.data_reader().await.map_err(Error::PayloadStore)?; + let len: u64 = entry.size().value(); + let mut pos = 0; + while pos < len { + let bytes = reader + .read_at(pos, chunk_size) + .await + .map_err(Error::PayloadStore)?; + pos += bytes.len() as u64; + let msg = map(bytes); + session.send(msg).await?; + } + Ok(true) + } else { + Ok(false) + } +} + +#[derive(Debug, Default)] +pub struct CurrentPayload(Option); + +#[derive(Debug)] +struct CurrentPayloadInner { + entry: Entry, + expected_length: u64, + received_length: u64, + writer: Option, +} + +#[derive(derive_more::Debug)] +struct PayloadWriter { + #[debug(skip)] + fut: BoxedLocal>, + sender: flume::Sender>, +} + +impl CurrentPayload { + pub fn new() -> Self { + Self::default() + } + + pub fn set(&mut self, entry: Entry, expected_length: Option) -> Result<(), Error> { + if self.0.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + let expected_length = expected_length.unwrap_or(entry.payload_length); + self.0 = Some(CurrentPayloadInner { + entry, + writer: None, + expected_length, + received_length: 0, + }); + Ok(()) + } + + pub async fn recv_chunk( + &mut self, + store: P, + chunk: Bytes, + ) -> anyhow::Result<()> { + let state = self.0.as_mut().ok_or(Error::InvalidMessageInCurrentState)?; + let len = chunk.len(); + let writer = state.writer.get_or_insert_with(move || { + let (tx, rx) = flume::bounded(1); + let fut = async move { + store + .import_stream( + rx.into_stream(), + iroh_blobs::BlobFormat::Raw, + IgnoreProgressSender::default(), + ) + .await + }; + let writer = PayloadWriter { + fut: fut.boxed_local(), + sender: tx, + }; + writer + }); + writer.sender.send_async(Ok(chunk)).await?; + state.received_length += len as u64; + // if state.received_length >= state.expected_length { + // self.finalize().await?; + // } + Ok(()) + } + + pub fn is_complete(&self) -> bool { + let Some(state) = self.0.as_ref() else { + return false; + }; + state.received_length >= state.expected_length + } + + pub async fn finalize(&mut self) -> Result<(), Error> { + let state = self.0.take().ok_or(Error::InvalidMessageInCurrentState)?; + let writer = state + .writer + .ok_or_else(|| Error::InvalidMessageInCurrentState)?; + drop(writer.sender); + let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; + if *tag.hash() != state.entry.payload_digest { + return Err(Error::PayloadDigestMismatch); + } + if len != state.entry.payload_length { + return Err(Error::PayloadDigestMismatch); + } + // TODO: protect from gc + // we could store a tag for each blob + // however we really want reference counting here, not individual tags + // can also fallback to the naive impl from iroh-docs to just protect all docs hashes on gc + // let hash_and_format = *tag.inner(); + // let name = b"foo"; + // store.set_tag(name, Some(hash_and_format)); + Ok(()) + } + + pub fn is_active(&self) -> bool { + self.0.as_ref().map(|s| s.writer.is_some()).unwrap_or(false) + } + pub fn assert_inactive(&self) -> Result<(), Error> { + if self.is_active() { + Err(Error::InvalidMessageInCurrentState) + } else { + Ok(()) + } + } +} diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 9e55d89c3a..00ab89e311 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,12 +1,7 @@ -use bytes::Bytes; -use futures_lite::{future::BoxedLocal, FutureExt, StreamExt}; +use futures_lite::StreamExt; use tracing::{debug, trace}; -use iroh_blobs::{ - store::{bao_tree::io::fsm::AsyncSliceReader, MapEntry, Store as PayloadStore}, - util::progress::IgnoreProgressSender, - TempTag, -}; +use iroh_blobs::store::Store as PayloadStore; use crate::{ proto::{ @@ -18,90 +13,34 @@ use crate::{ ReconciliationSendFingerprint, ReconciliationSendPayload, ReconciliationTerminatePayload, }, - willow::{AuthorisedEntry, Entry}, }, - session::{channels::MessageReceiver, AreaOfInterestIntersection, Error, Session}, - store::{EntryStore, ReadonlyStore, Shared, SplitAction, SyncConfig}, + session::{ + channels::MessageReceiver, payload::CurrentPayload, AreaOfInterestIntersection, Error, + Session, + }, + store::{ + broadcaster::{Broadcaster, Origin}, + EntryStore, ReadonlyStore, SplitAction, SyncConfig, + }, util::channel::WriteError, }; -#[derive(Debug)] -struct CurrentPayload { - entry: Entry, - writer: Option, -} - -#[derive(derive_more::Debug)] -struct PayloadWriter { - #[debug(skip)] - fut: BoxedLocal>, - sender: flume::Sender>, -} - -impl CurrentPayload { - async fn recv_chunk(&mut self, store: P, chunk: Bytes) -> anyhow::Result<()> { - let writer = self.writer.get_or_insert_with(move || { - let (tx, rx) = flume::bounded(1); - let fut = async move { - store - .import_stream( - rx.into_stream(), - iroh_blobs::BlobFormat::Raw, - IgnoreProgressSender::default(), - ) - .await - }; - let writer = PayloadWriter { - fut: fut.boxed_local(), - sender: tx, - }; - writer - }); - writer.sender.send_async(Ok(chunk)).await?; - Ok(()) - } - - fn is_active(&self) -> bool { - self.writer.is_some() - } - - async fn finalize(self) -> Result<(), Error> { - let writer = self - .writer - .ok_or_else(|| Error::InvalidMessageInCurrentState)?; - drop(writer.sender); - let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; - if *tag.hash() != self.entry.payload_digest { - return Err(Error::PayloadDigestMismatch); - } - if len != self.entry.payload_length { - return Err(Error::PayloadDigestMismatch); - } - // TODO: protect from gc - // we could store a tag for each blob - // however we really want reference counting here, not individual tags - // can also fallback to the naive impl from iroh-docs to just protect all docs hashes on gc - // let hash_and_format = *tag.inner(); - // let name = b"foo"; - // store.set_tag(name, Some(hash_and_format)); - Ok(()) - } -} +use super::payload::send_payload_chunked; #[derive(derive_more::Debug)] pub struct Reconciler { session: Session, - store: Shared, + store: Broadcaster, recv: MessageReceiver, snapshot: S::Snapshot, - current_payload: Option, + current_payload: CurrentPayload, payload_store: P, } impl Reconciler { pub fn new( session: Session, - store: Shared, + store: Broadcaster, payload_store: P, recv: MessageReceiver, ) -> Result { @@ -112,7 +51,7 @@ impl Reconciler { payload_store, snapshot, session, - current_payload: None, + current_payload: CurrentPayload::new(), }) } @@ -127,12 +66,18 @@ impl Reconciler { } } Some(intersection) = self.session.next_aoi_intersection() => { + if self.session.mode().is_live() { + self.store.add_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); + } if our_role.is_alfie() { self.initiate(intersection).await?; } } } - if self.session.reconciliation_is_complete() && !self.has_active_payload() { + if self.session.reconciliation_is_complete() + && !self.session.mode().is_live() + && !self.current_payload.is_active() + { debug!("reconciliation complete, close session"); break; } @@ -223,7 +168,7 @@ impl Reconciler { message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { trace!("on_announce_entries start"); - self.assert_no_active_payload()?; + self.current_payload.assert_inactive()?; let (namespace, range_count) = self.session.on_announce_entries(&message).await?; if message.want_response { self.announce_and_send_entries( @@ -242,36 +187,25 @@ impl Reconciler { } async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { - self.assert_no_active_payload()?; - let static_token = self + self.current_payload.assert_inactive()?; + self.session.decrement_pending_announced_entries()?; + let authorised_entry = self .session - .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) - .await; - - self.session.on_send_entry()?; - - let authorised_entry = AuthorisedEntry::try_from_parts( - message.entry.entry, - static_token, - message.dynamic_token, - )?; - - self.store.ingest_entry(&authorised_entry)?; - - self.current_payload = Some(CurrentPayload { - entry: authorised_entry.into_entry(), - writer: None, - }); - + .authorise_sent_entry( + message.entry.entry, + message.static_token_handle, + message.dynamic_token, + ) + .await?; + self.store + .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; + self.current_payload + .set(authorised_entry.into_entry(), Some(message.entry.available))?; Ok(()) } async fn on_send_payload(&mut self, message: ReconciliationSendPayload) -> Result<(), Error> { - let state = self - .current_payload - .as_mut() - .ok_or(Error::InvalidMessageInCurrentState)?; - state + self.current_payload .recv_chunk(self.payload_store.clone(), message.bytes) .await?; Ok(()) @@ -281,29 +215,10 @@ impl Reconciler { &mut self, _message: ReconciliationTerminatePayload, ) -> Result<(), Error> { - let state = self - .current_payload - .take() - .ok_or(Error::InvalidMessageInCurrentState)?; - state.finalize().await?; + self.current_payload.finalize().await?; Ok(()) } - fn assert_no_active_payload(&self) -> Result<(), Error> { - if self.has_active_payload() { - Err(Error::InvalidMessageInCurrentState) - } else { - Ok(()) - } - } - - fn has_active_payload(&self) -> bool { - self.current_payload - .as_ref() - .map(|cp| cp.is_active()) - .unwrap_or(false) - } - async fn send_fingerprint( &mut self, range: ThreeDRange, @@ -375,26 +290,17 @@ impl Reconciler { // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; + let chunk_size = 1024 * 64; if send_payloads { - let payload_entry = self - .payload_store - .get(&digest) - .await - .map_err(Error::PayloadStore)?; - if let Some(entry) = payload_entry { - let mut reader = entry.data_reader().await.map_err(Error::PayloadStore)?; - let len: u64 = entry.size().value(); - let chunk_size = 1024usize * 64; - let mut pos = 0; - while pos < len { - let bytes = reader - .read_at(pos, chunk_size) - .await - .map_err(Error::PayloadStore)?; - pos += bytes.len() as u64; - let msg = ReconciliationSendPayload { bytes }; - self.send(msg).await?; - } + if send_payload_chunked( + digest, + &self.payload_store, + &self.session, + chunk_size, + |bytes| ReconciliationSendPayload { bytes }.into(), + ) + .await? + { let msg = ReconciliationTerminatePayload; self.send(msg).await?; } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 56a0157180..83e5e25964 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -6,18 +6,23 @@ use tracing::{debug, error_span}; use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, - store::{EntryStore, KeyStore, Shared}, + store::{broadcaster::Broadcaster, EntryStore, KeyStore, Shared}, util::channel::Receiver, }; -use super::{channels::ChannelReceivers, reconciler::Reconciler}; +use super::{ + channels::ChannelReceivers, + data::{DataReceiver, DataSender}, + reconciler::Reconciler, + SessionMode, +}; const INITIAL_GUARANTEES: u64 = u64::MAX; impl Session { pub async fn run( self, - store: Shared, + store: Broadcaster, key_store: Shared, payload_store: P, recv: ChannelReceivers, @@ -31,6 +36,7 @@ impl Session { mut static_tokens_recv, mut capability_recv, mut aoi_recv, + data_recv, }, } = recv; @@ -42,6 +48,26 @@ impl Session { Ok(()) }); + // Only setup data receiver if session is configured in live mode. + if init.mode == SessionMode::Live { + let store = store.clone(); + let payload_store = payload_store.clone(); + self.spawn(error_span!("dat:r"), move |session| async move { + DataReceiver::new(session, store, payload_store) + .run(data_recv) + .await?; + Ok(()) + }); + } + if init.mode == SessionMode::Live { + let store = store.clone(); + let payload_store = payload_store.clone(); + self.spawn(error_span!("dat:s"), move |session| async move { + DataSender::new(session, store, payload_store).run().await?; + Ok(()) + }); + } + // Spawn a task to handle incoming capabilities. self.spawn(error_span!("cap"), move |session| async move { while let Some(message) = capability_recv.try_next().await? { @@ -78,6 +104,7 @@ impl Session { remaining = self.remaining_tasks(), "task completed" ); + // self.log_remaining_tasks(); result?; // Is this the right place for this check? It would run after each task // completion, so necessarily including the completion of the reconciliation @@ -87,10 +114,9 @@ impl Session { // TODO: We'll want to emit the completion event back to the application and // let it decide what to do (stop, keep open) - or pass relevant config in // SessionInit. - if self.reconciliation_is_complete() { + if !self.mode().is_live() && self.reconciliation_is_complete() { tracing::debug!("stop self: reconciliation is complete"); drop(guard); - // break; // Close all our send streams. // @@ -158,8 +184,11 @@ async fn setup( for (capability, aois) in init.interests.into_iter() { // TODO: implement private area intersection let intersection_handle = 0.into(); - let (our_capability_handle, message) = - session.bind_and_sign_capability(&key_store, intersection_handle, capability)?; + let (our_capability_handle, message) = session.bind_and_sign_capability( + &key_store, + intersection_handle, + capability.clone(), + )?; if let Some(message) = message { session.send(message).await?; } @@ -170,7 +199,7 @@ async fn setup( authorisation: our_capability_handle, }; // TODO: We could skip the clone if we re-enabled sending by reference. - session.bind_area_of_interest(Scope::Ours, msg.clone())?; + session.bind_area_of_interest(Scope::Ours, msg.clone(), &capability)?; session.send(msg).await?; } } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 34bc930230..f1afbd3a9b 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -11,17 +11,19 @@ use futures_lite::Stream; use tracing::{Instrument, Span}; use crate::{ + actor::SessionId, net::InitialTransmission, proto::{ challenge::ChallengeState, grouping::ThreeDRange, keys::NamespaceId, sync::{ - AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, IntersectionHandle, - IsHandle, LogicalChannel, Message, ReadCapability, ReconciliationAnnounceEntries, - ReconciliationSendFingerprint, SetupBindAreaOfInterest, SetupBindReadCapability, - SetupBindStaticToken, StaticToken, StaticTokenHandle, + AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, DynamicToken, + IntersectionHandle, IsHandle, LogicalChannel, Message, ReadCapability, + ReconciliationAnnounceEntries, ReconciliationSendFingerprint, SetupBindAreaOfInterest, + SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, }, + willow::{AuthorisedEntry, Entry}, }, store::{KeyStore, Shared}, util::{channel::WriteError, queue::Queue, task::JoinMap}, @@ -30,7 +32,7 @@ use crate::{ use super::{ channels::ChannelSenders, resource::{ResourceMap, ResourceMaps}, - AreaOfInterestIntersection, Error, Role, Scope, + AreaOfInterestIntersection, Error, Role, Scope, SessionMode, }; #[derive(Debug, Clone)] @@ -38,7 +40,9 @@ pub struct Session(Rc); #[derive(derive_more::Debug)] struct SessionInner { + id: SessionId, our_role: Role, + mode: SessionMode, state: RefCell, send: ChannelSenders, tasks: RefCell>>, @@ -46,12 +50,16 @@ struct SessionInner { impl Session { pub fn new( - send: ChannelSenders, + id: SessionId, + mode: SessionMode, our_role: Role, + send: ChannelSenders, initial_transmission: InitialTransmission, ) -> Self { let state = SessionState::new(initial_transmission); Self(Rc::new(SessionInner { + mode, + id, our_role, state: RefCell::new(state), send, @@ -59,6 +67,14 @@ impl Session { })) } + pub fn id(&self) -> &SessionId { + &self.0.id + } + + pub fn mode(&self) -> &SessionMode { + &self.0.mode + } + pub fn spawn(&self, span: Span, f: F) where F: FnOnce(Session) -> Fut, @@ -89,6 +105,14 @@ impl Session { tasks.len() } + // pub fn log_remaining_tasks(&self) { + // let tasks = self.0.tasks.borrow(); + // for t in tasks.iter() { + // let _guard = t.0.enter(); + // tracing::debug!("active"); + // } + // } + pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { self.0.send.send(message).await } @@ -185,11 +209,11 @@ impl Session { if let Some(range_count) = message.covers { state.mark_range_covered(message.receiver_handle, range_count)?; } - if state.pending_entries.is_some() { + if state.pending_announced_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); } if message.count != 0 { - state.pending_entries = Some(message.count); + state.pending_announced_entries = Some(message.count); } if message.want_response { let range_count = state.their_range_counter; @@ -279,14 +303,15 @@ impl Session { pub fn reconciliation_is_complete(&self) -> bool { let state = self.state(); tracing::debug!( - "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?}", + "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?} mode {:?}", state.reconciliation_started, state.our_uncovered_ranges.len(), - state.pending_entries + state.pending_announced_entries, + self.mode(), ); state.reconciliation_started && state.our_uncovered_ranges.is_empty() - && state.pending_entries.is_none() + && state.pending_announced_entries.is_none() } pub fn reveal_commitment(&self) -> Result { @@ -315,25 +340,62 @@ impl Session { pub fn bind_area_of_interest( &self, scope: Scope, - msg: SetupBindAreaOfInterest, + message: SetupBindAreaOfInterest, + capability: &ReadCapability, ) -> Result<(), Error> { - self.state_mut().bind_area_of_interest(scope, msg) + self.state_mut() + .bind_area_of_interest(scope, message, capability) } pub async fn on_bind_area_of_interest( &self, message: SetupBindAreaOfInterest, ) -> Result<(), Error> { - self.get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) + let capability = self + .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) .await; - self.bind_area_of_interest(Scope::Theirs, message)?; + self.state_mut() + .bind_area_of_interest(Scope::Theirs, message, &capability)?; Ok(()) } - pub fn on_send_entry(&self) -> Result<(), Error> { - self.state_mut().decrement_pending_entries() + pub async fn authorise_sent_entry( + &self, + entry: Entry, + static_token_handle: StaticTokenHandle, + dynamic_token: DynamicToken, + ) -> Result { + let static_token = self + .get_their_resource_eventually(|r| &mut r.static_tokens, static_token_handle) + .await; + + let authorised_entry = AuthorisedEntry::try_from_parts(entry, static_token, dynamic_token)?; + + Ok(authorised_entry) } + // pub async fn on_send_entry2(&self, entry: Entry, static_token_handle: StaticTokenHandle, dynamic_token: DynamicToken) -> Result<(), Error> { + // let static_token = self + // .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) + // .await; + // + // let authorised_entry = AuthorisedEntry::try_from_parts( + // message.entry.entry, + // static_token, + // message.dynamic_token, + // )?; + // + // self.state_mut().decrement_pending_announced_entries(); + // + // Ok(authorised_entry) + // } + + pub fn decrement_pending_announced_entries(&self) -> Result<(), Error> { + self.state_mut().decrement_pending_announced_entries() + } + + // pub fn prepare_entry_for_send(&self, entry: AuthorisedEntry) -> Result< + pub fn bind_our_static_token( &self, static_token: StaticToken, @@ -390,7 +452,7 @@ struct SessionState { our_range_counter: u64, their_range_counter: u64, our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, - pending_entries: Option, + pending_announced_entries: Option, intersection_queue: Queue, } @@ -409,7 +471,7 @@ impl SessionState { our_range_counter: 0, their_range_counter: 0, our_uncovered_ranges: Default::default(), - pending_entries: Default::default(), + pending_announced_entries: Default::default(), intersection_queue: Default::default(), } } @@ -418,17 +480,8 @@ impl SessionState { &mut self, scope: Scope, msg: SetupBindAreaOfInterest, + capability: &ReadCapability, ) -> Result<(), Error> { - let capability = match scope { - Scope::Ours => self - .our_resources - .capabilities - .try_get(&msg.authorisation)?, - Scope::Theirs => self - .their_resources - .capabilities - .try_get(&msg.authorisation)?, - }; capability.try_granted_area(&msg.area_of_interest.area)?; let namespace = *capability.granted_namespace(); @@ -472,14 +525,14 @@ impl SessionState { Ok(()) } - fn decrement_pending_entries(&mut self) -> Result<(), Error> { + fn decrement_pending_announced_entries(&mut self) -> Result<(), Error> { let remaining = self - .pending_entries + .pending_announced_entries .as_mut() .ok_or(Error::InvalidMessageInCurrentState)?; *remaining -= 1; if *remaining == 0 { - self.pending_entries = None; + self.pending_announced_entries = None; } Ok(()) } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index e270333838..8d5db5eb43 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -2,14 +2,19 @@ use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc}; use anyhow::Result; -use crate::proto::{ - grouping::{Range, RangeEnd, ThreeDRange}, - keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap::{self}, - sync::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId}, +use crate::{ + actor::SessionId, + proto::{ + grouping::{Range, RangeEnd, ThreeDRange}, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap, + sync::Fingerprint, + willow::{AuthorisedEntry, Entry, NamespaceId}, + }, }; +pub mod broadcaster; + #[derive(Debug, Clone, Copy)] pub struct SyncConfig { /// Up to how many values to send immediately, before sending only a fingerprint. @@ -112,37 +117,6 @@ impl Shared { pub fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { self.0.borrow().fingerprint(namespace, range) } - - // pub fn split_range( - // &self, - // namespace: NamespaceId, - // range: &ThreeDRange, - // config: &SyncConfig, - // ) -> Result>> { - // let this = self.clone(); - // this.0.borrow().split_range(namespace, range, config) - // } - // - // pub fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - // self.0.borrow().count(namespace, range) - // } - // - // pub fn get_entries_with_authorisation<'a>( - // &'a self, - // namespace: NamespaceId, - // range: &ThreeDRange, - // ) -> impl Iterator> + 'a { - // self.0.borrow().count(namespace, range) - // } - // - // fn get_entries<'a>( - // &'a self, - // namespace: NamespaceId, - // range: &ThreeDRange, - // ) -> impl Iterator> + 'a { - // self.get_entries_with_authorisation(namespace, range) - // .map(|e| e.map(|e| e.into_entry())) - // } } impl Shared { @@ -202,11 +176,19 @@ impl KeyStore for MemoryKeyStore { } } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MemoryStore { entries: HashMap>, } +impl Default for MemoryStore { + fn default() -> Self { + Self { + entries: Default::default(), + } + } +} + impl ReadonlyStore for MemoryStore { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { let mut fingerprint = Fingerprint::default(); @@ -337,21 +319,22 @@ impl ReadonlyStore for Arc { impl EntryStore for MemoryStore { type Snapshot = Arc; - // type KeyStore = MemoryKeyStore; + fn snapshot(&mut self) -> Result { Ok(Arc::new(Self { entries: self.entries.clone(), })) } - // fn key_store(&mut self) -> &mut Self::KeyStore { - // &mut self.keys - // } + fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result { let entries = self.entries.entry(entry.namespace_id()).or_default(); let new = entry.entry(); let mut to_remove = vec![]; for (i, existing) in entries.iter().enumerate() { let existing = existing.entry(); + if existing == new { + return Ok(false); + } if existing.subspace_id == new.subspace_id && existing.path.is_prefix_of(&new.path) && existing.is_newer_than(new) diff --git a/iroh-willow/src/store/broadcaster.rs b/iroh-willow/src/store/broadcaster.rs new file mode 100644 index 0000000000..75ed18e6ff --- /dev/null +++ b/iroh-willow/src/store/broadcaster.rs @@ -0,0 +1,134 @@ +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; +use tokio::sync::broadcast; + +use crate::{ + proto::{ + grouping::Area, + willow::{AuthorisedEntry, NamespaceId}, + }, + store::{EntryStore, Shared}, +}; + +use super::SessionId; + +const BROADCAST_CAP: usize = 1024; + +#[derive(Debug, Clone, Copy)] +pub enum Origin { + Local, + Remote(SessionId), +} + +#[derive(Debug)] +pub struct Broadcaster { + store: Shared, + broadcast: Arc>, +} + +impl Clone for Broadcaster { + fn clone(&self) -> Self { + Broadcaster { + store: self.store.clone(), + broadcast: self.broadcast.clone(), + } + } +} + +impl std::ops::Deref for Broadcaster { + type Target = Shared; + fn deref(&self) -> &Self::Target { + &self.store + } +} + +impl Broadcaster { + pub fn new(store: Shared) -> Self { + Self { + store, + broadcast: Default::default(), + } + } + + pub fn subscribe(&mut self, session_id: SessionId) -> broadcast::Receiver { + self.broadcast.lock().unwrap().subscribe(session_id) + } + + pub fn unsubscribe(&mut self, session_id: &SessionId) { + self.broadcast.lock().unwrap().unsubscribe(session_id) + } + + pub fn ingest_entry( + &mut self, + entry: &AuthorisedEntry, + origin: Origin, + ) -> anyhow::Result { + if self.store.ingest_entry(entry)? { + self.broadcast.lock().unwrap().broadcast(entry, origin); + Ok(true) + } else { + Ok(false) + } + } + + pub fn add_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + self.broadcast + .lock() + .unwrap() + .add_area(session, namespace, area); + } +} + +#[derive(Debug, Default)] +struct BroadcasterInner { + senders: HashMap>, + areas: HashMap>>, +} + +impl BroadcasterInner { + fn subscribe(&mut self, session: SessionId) -> broadcast::Receiver { + self.senders + .entry(session) + .or_insert_with(|| broadcast::Sender::new(BROADCAST_CAP)) + .subscribe() + } + + fn unsubscribe(&mut self, session: &SessionId) { + self.senders.remove(session); + self.areas.retain(|_namespace, sessions| { + sessions.remove(session); + !sessions.is_empty() + }); + } + + fn add_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + self.areas + .entry(namespace) + .or_default() + .entry(session) + .or_default() + .push(area) + } + + fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { + let Some(sessions) = self.areas.get_mut(&entry.namespace_id()) else { + return; + }; + for (session_id, areas) in sessions { + if let Origin::Remote(origin) = origin { + if origin == *session_id { + continue; + } + } + if areas.iter().any(|area| area.includes_entry(entry.entry())) { + self.senders + .get(session_id) + .expect("session sender to exist") + .send(entry.clone()) + .ok(); + } + } + } +} diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index c1b5d09e85..44ea57a7e5 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -11,7 +11,7 @@ use futures_concurrency::future::{future_group, FutureGroup}; use futures_lite::Stream; use tokio::task::JoinError; -#[derive(derive_more::Debug, Eq, PartialEq)] +#[derive(derive_more::Debug, Clone, Copy, Hash, Eq, PartialEq)] #[debug("{:?}", _0)] pub struct TaskKey(future_group::Key); @@ -26,7 +26,7 @@ pub struct TaskKey(future_group::Key); #[derive(Debug)] pub struct JoinMap { tasks: future_group::Keyed>, - keys: HashMap, + keys: HashMap, } impl Default for JoinMap { @@ -48,8 +48,9 @@ impl JoinMap { pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { let handle = tokio::task::spawn_local(future); let k = self.tasks.insert(handle); + let k = TaskKey(k); self.keys.insert(k, key); - TaskKey(k) + k } /// Poll for one of the tasks in the map to complete. @@ -60,13 +61,13 @@ impl JoinMap { let Some((key, item)) = std::task::ready!(Pin::new(&mut self.tasks).poll_next(cx)) else { return Poll::Ready(None); }; - let key = self.keys.remove(&key).expect("key to exist"); + let key = self.keys.remove(&TaskKey(key)).expect("key to exist"); Poll::Ready(Some((key, item))) } /// Remove a task from the map. pub fn remove(&mut self, task_key: &TaskKey) -> bool { - self.keys.remove(&task_key.0); + self.keys.remove(&task_key); self.tasks.remove(task_key.0) } @@ -79,6 +80,10 @@ impl JoinMap { pub fn len(&self) -> usize { self.tasks.len() } + + pub fn iter(&self) -> impl Iterator { + self.keys.iter().map(|(a, b)| (b, a)) + } } impl JoinMap { From c73a712dc7e9862aa9831d5a91d507ebffc81269 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 12:46:18 +0200 Subject: [PATCH 053/198] feat: live data mode works between two peers --- Cargo.lock | 101 +++++----- iroh-willow/Cargo.toml | 5 +- iroh-willow/src/actor.rs | 154 +++++++++++---- iroh-willow/src/net.rs | 268 +++++++++++++------------- iroh-willow/src/session/channels.rs | 29 +-- iroh-willow/src/session/data.rs | 13 +- iroh-willow/src/session/error.rs | 18 +- iroh-willow/src/session/reconciler.rs | 13 +- iroh-willow/src/session/run.rs | 114 ++++++----- iroh-willow/src/session/state.rs | 33 ++-- iroh-willow/src/store.rs | 45 +++-- iroh-willow/src/store/broadcaster.rs | 22 +-- iroh-willow/src/util/task.rs | 13 +- 13 files changed, 479 insertions(+), 349 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0952a786bf..077eea0704 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -201,7 +201,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", "synstructure 0.13.1", ] @@ -224,7 +224,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -235,7 +235,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -335,7 +335,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -686,7 +686,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1034,7 +1034,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1058,7 +1058,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1069,7 +1069,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1138,7 +1138,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1168,7 +1168,7 @@ checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", "unicode-xid", ] @@ -1251,7 +1251,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1396,7 +1396,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1409,7 +1409,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1429,7 +1429,7 @@ checksum = "5c785274071b1b420972453b306eeca06acf4633829db4223b58a2a8c5953bc4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -1684,7 +1684,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -2947,9 +2947,10 @@ dependencies = [ "test-strategy", "thiserror", "tokio", + "tokio-util", "tracing", "tracing-subscriber", - "zerocopy 0.8.0-alpha.7", + "zerocopy 0.8.0-alpha.14", ] [[package]] @@ -3470,7 +3471,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -3690,7 +3691,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -3721,7 +3722,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -3827,7 +3828,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -4038,7 +4039,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -4376,7 +4377,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -4925,7 +4926,7 @@ checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5006,7 +5007,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5241,7 +5242,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5288,7 +5289,7 @@ dependencies = [ "proc-macro2", "quote", "struct_iterable_internal", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5306,7 +5307,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5317,7 +5318,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5348,7 +5349,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5361,7 +5362,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5423,9 +5424,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -5475,7 +5476,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5561,7 +5562,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5595,7 +5596,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5693,7 +5694,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -5928,7 +5929,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -6191,7 +6192,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -6225,7 +6226,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6411,7 +6412,7 @@ checksum = "12168c33176773b86799be25e2a2ba07c7aab9968b37541f1094dbd7a60c8946" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -6422,7 +6423,7 @@ checksum = "f6fc35f58ecd95a9b71c4f2329b911016e6bec66b3f2e6a4aad86bd2e99e2f9b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -6433,7 +6434,7 @@ checksum = "9d8dc32e0095a7eeccebd0e3f09e9509365ecb3fc6ac4d6f5f14a3f6392942d1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -6444,7 +6445,7 @@ checksum = "08990546bf4edef8f431fa6326e032865f27138718c587dc21bc0265bbcb57cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] @@ -6737,11 +6738,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.0-alpha.7" +version = "0.8.0-alpha.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24d6914f948ad0c1eaf3f2cb03a66e674714280d020ea8d955f765f8abb2e7a" +checksum = "c48f429abfd70db34bee590d6b9ebe1f59b28c1ec0be3259910950d650148859" dependencies = [ - "zerocopy-derive 0.8.0-alpha.7", + "zerocopy-derive 0.8.0-alpha.14", ] [[package]] @@ -6752,18 +6753,18 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] name = "zerocopy-derive" -version = "0.8.0-alpha.7" +version = "0.8.0-alpha.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e862f7936bea2c96af2769d9d60ff534da9af29dd59943519403256f30bf5ac3" +checksum = "1d6f90ab9afe5283eb8184f0ae6188deae60d128091f1b3fe7bcbe5b2cb500a3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.55", ] [[package]] diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 5b0ec37d8b..929fb0ff26 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -17,7 +17,7 @@ workspace = true [dependencies] anyhow = "1" bytes = { version = "1.4", features = ["serde"] } -derive_more = { version = "1.0.0-beta.1", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from"] } +derive_more = { version = "1.0.0-beta.6", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } flume = "0.11" futures-concurrency = "7.6.0" @@ -36,8 +36,9 @@ serde = { version = "1.0.164", features = ["derive"] } strum = { version = "0.26", features = ["derive"] } thiserror = "1" tokio = { version = "1", features = ["sync"] } +tokio-util = { version = "0.7", features = ["io-util", "io"] } tracing = "0.1" -zerocopy = { version = "0.8.0-alpha.7", features = ["derive"] } +zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 25fc7ab1d6..4b152d9a69 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -5,6 +5,7 @@ use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; use iroh_blobs::store::Store as PayloadStore; use tokio::sync::oneshot; +use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ @@ -13,7 +14,7 @@ use crate::{ grouping::ThreeDRange, keys::NamespaceId, meadowcap, - willow::{AuthorisedEntry, Entry}, + willow::{AuthorisedEntry, Entry, WriteCapability}, }, session::{Channels, Error, Role, Session, SessionInit}, store::{ @@ -79,6 +80,21 @@ impl ActorHandle { reply_rx.await??; Ok(()) } + pub async fn insert_entry( + &self, + entry: Entry, + capability: WriteCapability, + ) -> anyhow::Result<()> { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::InsertEntry { + entry, + capability, + reply, + }) + .await?; + reply_rx.await??; + Ok(()) + } pub async fn insert_secret( &self, @@ -95,7 +111,7 @@ impl ActorHandle { &self, namespace: NamespaceId, range: ThreeDRange, - ) -> anyhow::Result> { + ) -> anyhow::Result>> { let (tx, rx) = flume::bounded(1024); self.send(ToActor::GetEntries { namespace, @@ -114,27 +130,18 @@ impl ActorHandle { channels: Channels, init: SessionInit, ) -> anyhow::Result { - let (on_finish_tx, on_finish_rx) = oneshot::channel(); + let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InitSession { our_role, initial_transmission, peer, channels, init, - on_finish: on_finish_tx, + reply, }) .await?; - let on_finish = on_finish_rx - .map(|r| match r { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(Arc::new(err.into())), - Err(_) => Err(Arc::new(Error::ActorFailed)), - }) - .boxed(); - let on_finish = on_finish.shared(); - let handle = SessionHandle { on_finish }; - Ok(handle) + reply_rx.await? } } @@ -154,14 +161,24 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { on_finish: future::Shared>>>, + finish: CancellationToken, } impl SessionHandle { /// Wait for the session to finish. /// /// Returns an error if the session failed to complete. - pub async fn on_finish(self) -> Result<(), Arc> { - self.on_finish.await + pub async fn on_finish(&self) -> Result<(), Arc> { + self.on_finish.clone().await + } + + /// Finish the session gracefully. + /// + /// After calling this, no further protocol messages will be sent from this node. + /// Previously queued messages will still be sent out. The session will only be closed + /// once the other peer closes their senders as well. + pub fn finish(&self) { + self.finish.cancel(); } } @@ -174,18 +191,24 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, - on_finish: oneshot::Sender>, + // on_finish: oneshot::Sender>, + reply: oneshot::Sender>, }, GetEntries { namespace: NamespaceId, range: ThreeDRange, #[debug(skip)] - reply: flume::Sender, + reply: flume::Sender>, }, IngestEntry { entry: AuthorisedEntry, reply: oneshot::Sender>, }, + InsertEntry { + entry: Entry, + capability: WriteCapability, + reply: oneshot::Sender>, + }, InsertSecret { secret: meadowcap::SecretKey, reply: oneshot::Sender>, @@ -234,7 +257,11 @@ impl StorageThread { } break; } - Ok(msg) => self.handle_message(msg)?, + Ok(msg) => { + if self.handle_message(msg).is_err() { + warn!("failed to send reply: receiver dropped"); + } + } }, Some((id, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => { let res = match res { @@ -254,7 +281,7 @@ impl StorageThread { id } - fn handle_message(&mut self, message: ToActor) -> Result<(), Error> { + fn handle_message(&mut self, message: ToActor) -> Result<(), SendReplyError> { trace!(%message, "tick: handle_message"); match message { ToActor::Shutdown { .. } => unreachable!("handled in run"), @@ -264,7 +291,7 @@ impl StorageThread { our_role, initial_transmission, init, - on_finish, + reply, } => { let Channels { send, recv } = channels; let id = self.next_session_id(); @@ -274,46 +301,72 @@ impl StorageThread { let key_store = self.key_store.clone(); let payload_store = self.payload_store.clone(); + let finish = CancellationToken::new(); + let future = session - .run(store, key_store, payload_store, recv, init) + .run(store, key_store, payload_store, recv, init, finish.clone()) .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); + let (on_finish_tx, on_finish_rx) = oneshot::channel(); + let active_session = ActiveSession { - on_finish, + on_finish: on_finish_tx, task_key, peer, }; self.sessions.insert(id, active_session); + let on_finish = on_finish_rx + .map(|r| match r { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(Arc::new(err.into())), + Err(_) => Err(Arc::new(Error::ActorFailed)), + }) + .boxed() + .shared(); + let handle = SessionHandle { on_finish, finish }; + send_reply(reply, Ok(handle)) } ToActor::GetEntries { namespace, range, reply, } => { - // TODO: We don't want to use a snapshot here. - let snapshot = self.store.snapshot()?; - let entries = snapshot - .get_entries(namespace, &range) - .filter_map(|r| r.ok()); - for entry in entries { - reply.send(entry).ok(); + let snapshot = self.store.snapshot(); + match snapshot { + Ok(snapshot) => { + iter_to_channel(reply, Ok(snapshot.get_entries(namespace, &range))) + } + Err(err) => reply.send(Err(err)).map_err(send_reply_error), } } ToActor::IngestEntry { entry, reply } => { let res = self.store.ingest_entry(&entry, Origin::Local); - reply.send(res).ok(); + send_reply(reply, res) } + ToActor::InsertEntry { + entry, + capability, + reply, + } => send_reply_with(reply, self, |slf| { + let user_id = capability.receiver().id(); + let secret_key = slf + .key_store + .get_user(&user_id) + .ok_or(Error::MissingUserKey(user_id))?; + let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; + slf.store + .ingest_entry(&authorised_entry, Origin::Local) + .map_err(Error::Store) + }), ToActor::InsertSecret { secret, reply } => { let res = self.key_store.insert(secret); - reply.send(res.map_err(anyhow::Error::from)).ok(); + send_reply(reply, res.map_err(anyhow::Error::from)) } } - Ok(()) } fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { - self.store.unsubscribe(session_id); let session = self.sessions.remove(session_id); if let Some(session) = session { session.on_finish.send(result).ok(); @@ -323,3 +376,36 @@ impl StorageThread { } } } + +#[derive(Debug)] +struct SendReplyError; + +fn send_reply(sender: oneshot::Sender, value: T) -> Result<(), SendReplyError> { + sender.send(value).map_err(send_reply_error) +} + +fn send_reply_with( + sender: oneshot::Sender>, + this: &mut StorageThread, + f: impl FnOnce(&mut StorageThread) -> Result, +) -> Result<(), SendReplyError> { + sender.send(f(this)).map_err(send_reply_error) +} + +fn send_reply_error(_err: T) -> SendReplyError { + SendReplyError +} +fn iter_to_channel( + channel: flume::Sender>, + iter: anyhow::Result>>, +) -> Result<(), SendReplyError> { + match iter { + Err(err) => channel.send(Err(err)).map_err(send_reply_error)?, + Ok(iter) => { + for item in iter { + channel.send(item).map_err(send_reply_error)?; + } + } + } + Ok(()) +} diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ec50d3227e..e44ea3f7a8 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -10,7 +10,7 @@ use tokio::{ use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrument, Span}; use crate::{ - actor::ActorHandle, + actor::{self, ActorHandle}, proto::sync::{ AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, @@ -36,11 +36,12 @@ pub async fn run( conn: Connection, our_role: Role, init: SessionInit, -) -> anyhow::Result<()> { - debug!(?our_role, "connected"); +) -> anyhow::Result { let peer = iroh_net::magic_endpoint::get_remote_node_id(&conn)?; - Span::current().record("peer", peer.fmt_short()); - let mut join_set = JoinSet::new(); + Span::current().record("peer", tracing::field::display(peer.fmt_short())); + debug!(?our_role, "connected"); + + let mut tasks = JoinSet::new(); let (mut control_send_stream, mut control_recv_stream) = match our_role { Role::Alfie => conn.open_bi().await?, @@ -54,7 +55,7 @@ pub async fn run( debug!("exchanged commitments"); let (control_send, control_recv) = spawn_channel( - &mut join_set, + &mut tasks, Channel::Control, CHANNEL_CAP, CHANNEL_CAP, @@ -63,7 +64,7 @@ pub async fn run( control_recv_stream, ); - let (logical_send, logical_recv) = open_logical_channels(&mut join_set, conn, our_role).await?; + let (logical_send, logical_recv) = open_logical_channels(&mut tasks, conn, our_role).await?; debug!("logical channels ready"); let channels = Channels { send: ChannelSenders { @@ -79,15 +80,33 @@ pub async fn run( .init_session(peer, our_role, initial_transmission, channels, init) .await?; - join_set.spawn(async move { - handle.on_finish().await?; - tracing::info!("session finished"); - Ok(()) - }); + Ok(SessionHandle { handle, tasks }) +} - join_all(join_set).await?; - debug!("all tasks finished"); - Ok(()) +#[derive(Debug)] +pub struct SessionHandle { + handle: actor::SessionHandle, + tasks: JoinSet>, +} + +impl SessionHandle { + /// Finish the session gracefully. + /// + /// After calling this, no further protocol messages will be sent from this node. + /// Previously queued messages will still be sent out. The session will only be closed + /// once the other peer closes their senders as well. + pub fn finish(&self) { + self.handle.finish() + } + + /// Wait for the session to finish. + /// + /// Returns an error if the session failed to complete. + pub async fn join(&mut self) -> anyhow::Result<()> { + let session_res = self.handle.on_finish().await; + let net_tasks_res = join_all(&mut self.tasks).await; + session_res.or(net_tasks_res) + } } #[derive(Debug, thiserror::Error)] @@ -160,6 +179,7 @@ async fn open_logical_channels( let aoi = take_and_spawn_channel(LogicalChannel::AreaOfInterest)?; let cap = take_and_spawn_channel(LogicalChannel::Capability)?; let dat = take_and_spawn_channel(LogicalChannel::Data)?; + Ok(( LogicalChannelSenders { reconciliation: rec.0, @@ -212,7 +232,7 @@ async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> a trace!(len = buf.bytes.len(), "recv"); } channel_writer.close(); - debug!("closed"); + trace!("close"); Ok(()) } @@ -222,9 +242,8 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyho send_stream.write_chunk(data).await?; trace!(len, "sent"); } - debug!("close"); send_stream.finish().await?; - debug!("closed"); + trace!("close"); Ok(()) } @@ -259,7 +278,7 @@ pub struct InitialTransmission { pub their_max_payload_size: u64, } -async fn join_all(mut join_set: JoinSet>) -> anyhow::Result<()> { +async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result<()> { let mut final_result = Ok(()); let mut joined = 0; while let Some(res) = join_set.join_next().await { @@ -284,7 +303,6 @@ mod tests { use std::{collections::BTreeSet, time::Instant}; use futures_lite::StreamExt; - use futures_util::FutureExt; use iroh_base::key::SecretKey; use iroh_blobs::store::Store as PayloadStore; use iroh_net::{MagicEndpoint, NodeAddr, NodeId}; @@ -312,59 +330,29 @@ mod tests { async fn smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let n_betty: usize = std::env::var("N_BETTY") - .as_deref() - .unwrap_or("1000") - .parse() - .unwrap(); - let n_alfie: usize = std::env::var("N_ALFIE") - .as_deref() - .unwrap_or("1000") - .parse() - .unwrap(); + let n_betty = parse_env_var("N_BETTY", 100); + let n_alfie = parse_env_var("N_ALFIE", 100); let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; debug!("start connect"); let (conn_alfie, conn_betty) = tokio::join!( - async move { ep_alfie.connect(addr_betty, ALPN).await }, - async move { - let connecting = ep_betty.accept().await.unwrap(); - connecting.await - } + async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, + async move { ep_betty.accept().await.unwrap().await.unwrap() } ); - let conn_alfie = conn_alfie.unwrap(); - let conn_betty = conn_betty.unwrap(); info!("connected! now start reconciliation"); let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace_id: NamespaceId = namespace_secret.public_key().into(); + let namespace_id = namespace_secret.id(); let start = Instant::now(); let mut expected_entries = BTreeSet::new(); - let store_alfie = MemoryStore::default(); - let keys_alfie = MemoryKeyStore::default(); - let payloads_alfie = iroh_blobs::store::mem::Store::default(); - let handle_alfie = ActorHandle::spawn( - store_alfie, - keys_alfie, - payloads_alfie.clone(), - node_id_alfie, - ); - - let store_betty = MemoryStore::default(); - let keys_betty = MemoryKeyStore::default(); - let payloads_betty = iroh_blobs::store::mem::Store::default(); - let handle_betty = ActorHandle::spawn( - store_betty, - keys_betty, - payloads_betty.clone(), - node_id_betty, - ); + let (handle_alfie, payloads_alfie) = create_stores(node_id_alfie); + let (handle_betty, payloads_betty) = create_stores(node_id_betty); - let (init_alfie, _, _) = setup_and_insert( + let (init_alfie, _) = setup_and_insert( SessionMode::ReconcileOnce, &mut rng, &handle_alfie, @@ -375,7 +363,7 @@ mod tests { |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), ) .await?; - let (init_betty, _, _) = setup_and_insert( + let (init_betty, _) = setup_and_insert( SessionMode::ReconcileOnce, &mut rng, &handle_betty, @@ -391,15 +379,14 @@ mod tests { println!("init took {:?}", start.elapsed()); let start = Instant::now(); - let (res_alfie, res_betty) = tokio::join!( + let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, handle_alfie.clone(), conn_alfie, Role::Alfie, init_alfie - ) - .inspect(|res| info!("alfie done: {res:?}")), + ), run( node_id_betty, handle_betty.clone(), @@ -407,20 +394,14 @@ mod tests { Role::Betty, init_betty ) - .inspect(|res| info!("betty done: {res:?}")), ); + let mut session_alfie = session_alfie?; + let mut session_betty = session_betty?; + let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); info!(time=?start.elapsed(), "reconciliation finished"); info!("alfie res {:?}", res_alfie); info!("betty res {:?}", res_betty); - // info!( - // "alfie store {:?}", - // get_entries_debug(&handle_alfie, namespace_id).await? - // ); - // info!( - // "betty store {:?}", - // get_entries_debug(&handle_betty, namespace_id).await? - // ); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; @@ -434,19 +415,6 @@ mod tests { Ok(()) } - pub async fn create_endpoint( - rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<(MagicEndpoint, NodeId, NodeAddr)> { - let ep = MagicEndpoint::builder() - .secret_key(SecretKey::generate_with_rng(rng)) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let addr = ep.my_addr().await?; - let node_id = ep.node_id(); - Ok((ep, node_id, addr)) - } - #[tokio::test(flavor = "multi_thread")] async fn live_data() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); @@ -457,18 +425,13 @@ mod tests { debug!("start connect"); let (conn_alfie, conn_betty) = tokio::join!( - async move { ep_alfie.connect(addr_betty, ALPN).await }, - async move { - let connecting = ep_betty.accept().await.unwrap(); - connecting.await - } + async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, + async move { ep_betty.accept().await.unwrap().await.unwrap() } ); - let conn_alfie = conn_alfie.unwrap(); - let conn_betty = conn_betty.unwrap(); info!("connected! now start reconciliation"); let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace_id: NamespaceId = namespace_secret.public_key().into(); + let namespace_id = namespace_secret.id(); let start = Instant::now(); let mut expected_entries = BTreeSet::new(); @@ -493,7 +456,7 @@ mod tests { node_id_betty, ); - let (init_alfie, secret_alfie, cap_alfie) = setup_and_insert( + let (init_alfie, cap_alfie) = setup_and_insert( SessionMode::Live, &mut rng, &handle_alfie, @@ -504,7 +467,7 @@ mod tests { |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), ) .await?; - let (init_betty, _secret_betty, _cap_betty) = setup_and_insert( + let (init_betty, _cap_betty) = setup_and_insert( SessionMode::Live, &mut rng, &handle_betty, @@ -520,6 +483,9 @@ mod tests { println!("init took {:?}", start.elapsed()); let start = Instant::now(); + let (done_tx, done_rx) = tokio::sync::oneshot::channel(); + + // alfie insert 3 enries after waiting a second let _insert_task_alfie = tokio::task::spawn({ let store = handle_alfie.clone(); let payload_store = payloads_alfie.clone(); @@ -529,32 +495,31 @@ mod tests { let mut track_entries = vec![]; async move { - tokio::time::sleep(std::time::Duration::from_secs(2)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; insert( &store, &payload_store, - count, namespace_id, - &secret_alfie, - &cap_alfie, + cap_alfie, + count, content_fn, path_fn, &mut track_entries, ) .await .expect("failed to insert"); + done_tx.send(track_entries).unwrap(); } }); - let (res_alfie, res_betty) = tokio::join!( + let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, handle_alfie.clone(), conn_alfie, Role::Alfie, init_alfie - ) - .inspect(|res| info!("alfie done: {res:?}")), + ), run( node_id_betty, handle_betty.clone(), @@ -562,20 +527,19 @@ mod tests { Role::Betty, init_betty ) - .inspect(|res| info!("betty done: {res:?}")), ); + let mut session_alfie = session_alfie?; + let mut session_betty = session_betty?; + + let live_entries = done_rx.await?; + expected_entries.extend(live_entries); + session_alfie.finish(); + + let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); info!(time=?start.elapsed(), "reconciliation finished"); info!("alfie res {:?}", res_alfie); info!("betty res {:?}", res_betty); - // info!( - // "alfie store {:?}", - // get_entries_debug(&handle_alfie, namespace_id).await? - // ); - // info!( - // "betty store {:?}", - // get_entries_debug(&handle_betty, namespace_id).await? - // ); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; @@ -589,25 +553,45 @@ mod tests { Ok(()) } + pub async fn create_endpoint( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<(MagicEndpoint, NodeId, NodeAddr)> { + let ep = MagicEndpoint::builder() + .secret_key(SecretKey::generate_with_rng(rng)) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let addr = ep.my_addr().await?; + let node_id = ep.node_id(); + Ok((ep, node_id, addr)) + } + + pub fn create_stores(me: NodeId) -> (ActorHandle, iroh_blobs::store::mem::Store) { + let store = MemoryStore::default(); + let keys = MemoryKeyStore::default(); + let payloads = iroh_blobs::store::mem::Store::default(); + let handle = ActorHandle::spawn(store, keys, payloads.clone(), me); + (handle, payloads) + } + async fn get_entries( store: &ActorHandle, namespace: NamespaceId, ) -> anyhow::Result> { - let entries: BTreeSet<_> = store + let entries: anyhow::Result> = store .get_entries(namespace, ThreeDRange::full()) .await? - .collect::>() + .try_collect() .await; - Ok(entries) + entries } async fn insert( store: &ActorHandle, payload_store: &P, - count: usize, namespace_id: NamespaceId, - user_secret: &UserSecretKey, - write_cap: &WriteCapability, + write_cap: WriteCapability, + count: usize, content_fn: impl Fn(usize) -> String, path_fn: impl Fn(usize) -> Result, track_entries: &mut impl Extend, @@ -622,14 +606,13 @@ mod tests { let path = path_fn(i).expect("invalid path"); let entry = Entry::new_current( namespace_id, - user_secret.id(), + write_cap.receiver().id(), path, payload_digest, payload_len, ); track_entries.extend([entry.clone()]); - let entry = entry.attach_authorisation(write_cap.clone(), &user_secret)?; - store.ingest_entry(entry).await?; + store.insert_entry(entry, write_cap.clone()).await?; } Ok(()) } @@ -643,26 +626,40 @@ mod tests { count: usize, track_entries: &mut impl Extend, path_fn: impl Fn(usize) -> Result, - ) -> anyhow::Result<(SessionInit, UserSecretKey, WriteCapability)> { - let user_secret = UserSecretKey::generate(rng); - let user_id_short = user_secret.id().fmt_short(); - store.insert_secret(user_secret.clone()).await?; - let (read_cap, write_cap) = create_capabilities(namespace_secret, user_secret.public_key()); - let content_fn = |i| format!("initial entry {i} for {user_id_short}"); + ) -> anyhow::Result<(SessionInit, WriteCapability)> { + let (read_cap, write_cap) = setup_capabilities(rng, store, namespace_secret).await?; + let content_fn = |i| { + format!( + "initial entry {i} for {}", + write_cap.receiver().id().fmt_short() + ) + }; insert( store, payload_store, - count, namespace_secret.id(), - &user_secret, - &write_cap, + write_cap.clone(), + count, content_fn, path_fn, track_entries, ) .await?; let init = SessionInit::with_interest(mode, read_cap, AreaOfInterest::full()); - Ok((init, user_secret, write_cap)) + Ok((init, write_cap)) + } + + async fn setup_capabilities( + rng: &mut impl CryptoRngCore, + + store: &ActorHandle, + namespace_secret: &NamespaceSecretKey, + ) -> anyhow::Result<(ReadCapability, WriteCapability)> { + let user_secret = UserSecretKey::generate(rng); + let user_public_key = user_secret.public_key(); + store.insert_secret(user_secret.clone()).await?; + let (read_cap, write_cap) = create_capabilities(namespace_secret, user_public_key); + Ok((read_cap, write_cap)) } fn create_capabilities( @@ -682,6 +679,19 @@ mod tests { (read_capability, write_capability) } + fn parse_env_var(var: &str, default: T) -> T + where + T: std::str::FromStr, + T::Err: std::fmt::Debug, + { + match std::env::var(var).as_deref() { + Ok(val) => val + .parse() + .expect(&format!("failed to parse environment variable {var}")), + Err(_) => default, + } + } + // async fn get_entries_debug( // store: &StoreHandle, // namespace: NamespaceId, diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 1683ad7b87..eaada31164 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -6,7 +6,7 @@ use std::{ }; use futures_lite::Stream; -use tracing::debug; +use tracing::trace; use crate::{ proto::sync::{ @@ -39,7 +39,7 @@ impl> MessageReceiver { None => None, Some(Err(err)) => Some(Err(err.into())), Some(Ok(message)) => { - debug!(%message, "recv"); + trace!(%message, "recv"); let message = message.try_into().map_err(|_| Error::WrongChannel); Some(message) } @@ -148,31 +148,8 @@ impl ChannelSenders { pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { let message: Message = message.into(); let channel = message.channel(); - tracing::trace!(%message, ch=%channel.fmt_short(), "now send"); self.get(channel).send_message(&message).await?; - debug!(%message, ch=%channel.fmt_short(), "sent"); + trace!(%message, ch=%channel.fmt_short(), "sent"); Ok(()) } } - -impl ChannelReceivers { - pub fn close_all(&self) { - self.control_recv.close(); - self.logical_recv.close(); - } - // pub fn get(&self, channel: LogicalChannel) -> &Receiver { - // match channel { - // LogicalChannel::Control => &self.control_recv, - // LogicalChannel::Reconciliation => &self.logical_recv.reconciliation_recv, - // LogicalChannel::StaticToken => &self.logical_recv.static_tokens_recv, - // } - // } - // - // pub async fn recv(&self, channel: LogicalChannel) -> Option> { - // let message = self.get(channel).recv().await; - // if let Some(Ok(message)) = &message { - // debug!(%message, ch=%channel.fmt_short(),"recv"); - // } - // message - // } -} diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index b1f002ac19..168cbd6fd7 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -92,19 +92,26 @@ pub struct DataReceiver { store: Broadcaster, payload_store: P, current_payload: CurrentPayload, + recv: MessageReceiver, } impl DataReceiver { - pub fn new(session: Session, store: Broadcaster, payload_store: P) -> Self { + pub fn new( + session: Session, + store: Broadcaster, + payload_store: P, + recv: MessageReceiver, + ) -> Self { Self { session, store, payload_store, current_payload: Default::default(), + recv, } } - pub async fn run(mut self, mut recv: MessageReceiver) -> Result<(), Error> { - while let Some(message) = recv.try_next().await? { + pub async fn run(mut self) -> Result<(), Error> { + while let Some(message) = self.recv.try_next().await? { self.on_message(message).await?; } Ok(()) diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index b300c30d4c..ef171416a5 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,7 +1,11 @@ use ed25519_dalek::SignatureError; use crate::{ - proto::{meadowcap::InvalidCapability, sync::ResourceHandle, willow::Unauthorised}, + proto::{ + meadowcap::{self, UserId}, + sync::ResourceHandle, + willow::Unauthorised, + }, store::KeyStoreError, util::channel::{ReadError, WriteError}, }; @@ -54,6 +58,8 @@ pub enum Error { InvalidState(&'static str), #[error("actor failed to respond")] ActorFailed, + #[error("missing user secret key for {0:?}")] + MissingUserKey(UserId), #[error("a task failed to join")] TaskFailed(#[from] tokio::task::JoinError), } @@ -63,8 +69,8 @@ impl From for Error { Self::UnauthorisedEntryReceived } } -impl From for Error { - fn from(_value: InvalidCapability) -> Self { +impl From for Error { + fn from(_value: meadowcap::InvalidCapability) -> Self { Self::InvalidCapability } } @@ -74,3 +80,9 @@ impl From for Error { Self::InvalidSignature } } + +impl From for Error { + fn from(_value: meadowcap::InvalidParams) -> Self { + Self::InvalidParameters("") + } +} diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 00ab89e311..d50ec763aa 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,7 +1,6 @@ use futures_lite::StreamExt; -use tracing::{debug, trace}; - use iroh_blobs::store::Store as PayloadStore; +use tracing::{debug, trace}; use crate::{ proto::{ @@ -15,8 +14,8 @@ use crate::{ }, }, session::{ - channels::MessageReceiver, payload::CurrentPayload, AreaOfInterestIntersection, Error, - Session, + channels::MessageReceiver, payload::send_payload_chunked, payload::CurrentPayload, + AreaOfInterestIntersection, Error, Session, }, store::{ broadcaster::{Broadcaster, Origin}, @@ -25,8 +24,6 @@ use crate::{ util::channel::WriteError, }; -use super::payload::send_payload_chunked; - #[derive(derive_more::Debug)] pub struct Reconciler { session: Session, @@ -67,7 +64,7 @@ impl Reconciler { } Some(intersection) = self.session.next_aoi_intersection() => { if self.session.mode().is_live() { - self.store.add_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); + self.store.watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); } if our_role.is_alfie() { self.initiate(intersection).await?; @@ -78,7 +75,7 @@ impl Reconciler { && !self.session.mode().is_live() && !self.current_payload.is_active() { - debug!("reconciliation complete, close session"); + debug!("reconciliation complete and not in live mode: close session"); break; } } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 83e5e25964..7dd1ba3f98 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,7 +1,8 @@ use futures_lite::StreamExt; use iroh_blobs::store::Store as PayloadStore; use strum::IntoEnumIterator; -use tracing::{debug, error_span}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error_span, trace}; use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, @@ -27,6 +28,7 @@ impl Session { payload_store: P, recv: ChannelReceivers, init: SessionInit, + finish: CancellationToken, ) -> Result<(), Error> { let ChannelReceivers { control_recv, @@ -50,21 +52,23 @@ impl Session { // Only setup data receiver if session is configured in live mode. if init.mode == SessionMode::Live { - let store = store.clone(); - let payload_store = payload_store.clone(); - self.spawn(error_span!("dat:r"), move |session| async move { - DataReceiver::new(session, store, payload_store) - .run(data_recv) - .await?; - Ok(()) + self.spawn(error_span!("dat:r"), { + let store = store.clone(); + let payload_store = payload_store.clone(); + move |session| async move { + DataReceiver::new(session, store, payload_store, data_recv) + .run() + .await?; + Ok(()) + } }); - } - if init.mode == SessionMode::Live { - let store = store.clone(); - let payload_store = payload_store.clone(); - self.spawn(error_span!("dat:s"), move |session| async move { - DataSender::new(session, store, payload_store).run().await?; - Ok(()) + self.spawn(error_span!("dat:s"), { + let store = store.clone(); + let payload_store = payload_store.clone(); + move |session| async move { + DataSender::new(session, store, payload_store).run().await?; + Ok(()) + } }); } @@ -85,47 +89,58 @@ impl Session { }); // Spawn a task to handle reconciliation messages - self.spawn(error_span!("rec"), move |session| async move { - Reconciler::new(session, store, payload_store, reconciliation_recv)? - .run() - .await + self.spawn(error_span!("rec"), { + let finish = finish.clone(); + let store = store.clone(); + move |session| async move { + let res = Reconciler::new(session, store, payload_store, reconciliation_recv)? + .run() + .await; + finish.cancel(); + res + } }); // Spawn a task to handle control messages - self.spawn(tracing::Span::current(), move |session| async move { - control_loop(session, key_store, control_recv, init).await + self.spawn(error_span!("ctl"), { + let finish = finish.clone(); + move |session| async move { + let res = control_loop(session, key_store, control_recv, init).await; + finish.cancel(); + res + } }); - // Loop over task completions, break on failure or if reconciliation completed + // Spawn a task to handle session termination. + self.spawn(error_span!("fin"), move |session| async move { + // Wait until the session is cancelled: + // * either because SessionMode is ReconcileOnce and reconciliation finished + // * or because the session was cancelled from the outside session handle + finish.cancelled().await; + // Then close all senders. This will make all other tasks terminate once the remote + // closed their senders as well. + session.close_senders(); + // Unsubscribe from the store. This stops the data send task. + store.unsubscribe(session.id()); + Ok(()) + }); + + // Wait for all tasks to complete. + // We are not cancelling here so we have to make sure that all tasks terminate (structured + // concurrency basically). + let mut final_result = Ok(()); while let Some((span, result)) = self.join_next_task().await { - let guard = span.enter(); - debug!( - ?result, - remaining = self.remaining_tasks(), - "task completed" - ); - // self.log_remaining_tasks(); - result?; - // Is this the right place for this check? It would run after each task - // completion, so necessarily including the completion of the reconciliation - // task, which is the only condition in which reconciliation can complete at - // the moment. - // - // TODO: We'll want to emit the completion event back to the application and - // let it decide what to do (stop, keep open) - or pass relevant config in - // SessionInit. - if !self.mode().is_live() && self.reconciliation_is_complete() { - tracing::debug!("stop self: reconciliation is complete"); - drop(guard); - - // Close all our send streams. - // - // This makes the networking send loops stop. - self.close_senders(); + let _guard = span.enter(); + trace!(?result, remaining = self.remaining_tasks(), "task complete"); + if let Err(err) = result { + tracing::warn!(?err, "task failed: {err}"); + if final_result.is_ok() { + final_result = Err(err); + } } } - - Ok(()) + debug!(success = final_result.is_ok(), "session complete"); + final_result } } @@ -152,7 +167,6 @@ async fn control_loop( } while let Some(message) = control_recv.try_next().await? { - debug!(%message, "recv"); match message { Message::CommitmentReveal(msg) => { session.on_commitment_reveal(msg)?; @@ -165,7 +179,7 @@ async fn control_loop( } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; - debug!(?channel, %amount, "add guarantees"); + trace!(?channel, %amount, "add guarantees"); session.add_guarantees(channel, amount); } _ => return Err(Error::UnsupportedMessage), diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index f1afbd3a9b..75c6190954 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -100,18 +100,23 @@ impl Session { .await } + pub fn abort_all_tasks(&self) { + self.0.tasks.borrow_mut().abort_all(); + } + pub fn remaining_tasks(&self) -> usize { let tasks = self.0.tasks.borrow(); tasks.len() } - // pub fn log_remaining_tasks(&self) { - // let tasks = self.0.tasks.borrow(); - // for t in tasks.iter() { - // let _guard = t.0.enter(); - // tracing::debug!("active"); - // } - // } + pub fn log_remaining_tasks(&self) { + let tasks = self.0.tasks.borrow(); + let names = tasks + .iter() + .map(|t| t.0.metadata().unwrap().name()) + .collect::>(); + tracing::debug!(tasks=?names, "active_tasks"); + } pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { self.0.send.send(message).await @@ -302,13 +307,13 @@ impl Session { pub fn reconciliation_is_complete(&self) -> bool { let state = self.state(); - tracing::debug!( - "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?} mode {:?}", - state.reconciliation_started, - state.our_uncovered_ranges.len(), - state.pending_announced_entries, - self.mode(), - ); + // tracing::debug!( + // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?} mode {:?}", + // state.reconciliation_started, + // state.our_uncovered_ranges.len(), + // state.pending_announced_entries, + // self.mode(), + // ); state.reconciliation_started && state.our_uncovered_ranges.is_empty() && state.pending_announced_entries.is_none() diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 8d5db5eb43..1eb53d500c 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -48,12 +48,25 @@ pub enum KeyScope { pub trait KeyStore: Send + 'static { fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError>; - fn sign_user(&self, id: &UserId, message: &[u8]) -> Result; + fn get_user(&self, id: &UserId) -> Option<&UserSecretKey>; + fn get_namespace(&self, id: &NamespaceId) -> Option<&NamespaceSecretKey>; + + fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { + Ok(self + .get_user(id) + .ok_or(KeyStoreError::MissingKey)? + .sign(message)) + } fn sign_namespace( &self, id: &NamespaceId, message: &[u8], - ) -> Result; + ) -> Result { + Ok(self + .get_namespace(id) + .ok_or(KeyStoreError::MissingKey)? + .sign(message)) + } } pub trait EntryStore: ReadonlyStore + 'static { @@ -135,6 +148,14 @@ impl Shared { ) -> Result { self.0.borrow().sign_namespace(id, message) } + + pub fn get_user(&self, id: &UserId) -> Option { + self.0.borrow().get_user(id).cloned() + } + + pub fn get_namespace(&self, id: &NamespaceId) -> Option { + self.0.borrow().get_namespace(id).cloned() + } } #[derive(Debug, Default)] @@ -155,24 +176,12 @@ impl KeyStore for MemoryKeyStore { }) } - fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { - Ok(self - .user - .get(id) - .ok_or(KeyStoreError::MissingKey)? - .sign(message)) + fn get_user(&self, id: &UserId) -> Option<&UserSecretKey> { + self.user.get(id) } - fn sign_namespace( - &self, - id: &NamespaceId, - message: &[u8], - ) -> Result { - Ok(self - .namespace - .get(id) - .ok_or(KeyStoreError::MissingKey)? - .sign(message)) + fn get_namespace(&self, id: &NamespaceId) -> Option<&NamespaceSecretKey> { + self.namespace.get(id) } } diff --git a/iroh-willow/src/store/broadcaster.rs b/iroh-willow/src/store/broadcaster.rs index 75ed18e6ff..e4d9c839e2 100644 --- a/iroh-willow/src/store/broadcaster.rs +++ b/iroh-willow/src/store/broadcaster.rs @@ -52,14 +52,6 @@ impl Broadcaster { } } - pub fn subscribe(&mut self, session_id: SessionId) -> broadcast::Receiver { - self.broadcast.lock().unwrap().subscribe(session_id) - } - - pub fn unsubscribe(&mut self, session_id: &SessionId) { - self.broadcast.lock().unwrap().unsubscribe(session_id) - } - pub fn ingest_entry( &mut self, entry: &AuthorisedEntry, @@ -73,11 +65,19 @@ impl Broadcaster { } } - pub fn add_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { + self.broadcast.lock().unwrap().subscribe(session_id) + } + + pub fn unsubscribe(&self, session_id: &SessionId) { + self.broadcast.lock().unwrap().unsubscribe(session_id) + } + + pub fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { self.broadcast .lock() .unwrap() - .add_area(session, namespace, area); + .watch_area(session, namespace, area); } } @@ -103,7 +103,7 @@ impl BroadcasterInner { }); } - fn add_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { self.areas .entry(namespace) .or_default() diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index 44ea57a7e5..40720cdf73 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -9,6 +9,7 @@ use std::{ use futures_concurrency::future::{future_group, FutureGroup}; use futures_lite::Stream; +use tokio::task::AbortHandle; use tokio::task::JoinError; #[derive(derive_more::Debug, Clone, Copy, Hash, Eq, PartialEq)] @@ -26,6 +27,7 @@ pub struct TaskKey(future_group::Key); #[derive(Debug)] pub struct JoinMap { tasks: future_group::Keyed>, + abort_handles: HashMap, keys: HashMap, } @@ -33,7 +35,8 @@ impl Default for JoinMap { fn default() -> Self { Self { tasks: FutureGroup::new().keyed(), - keys: HashMap::new(), + keys: Default::default(), + abort_handles: Default::default(), } } } @@ -47,9 +50,11 @@ impl JoinMap { /// Spawn a new task on the currently executing [`tokio::task::LocalSet`]. pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { let handle = tokio::task::spawn_local(future); + let abort_handle = handle.abort_handle(); let k = self.tasks.insert(handle); let k = TaskKey(k); self.keys.insert(k, key); + self.abort_handles.insert(k, abort_handle); k } @@ -84,6 +89,12 @@ impl JoinMap { pub fn iter(&self) -> impl Iterator { self.keys.iter().map(|(a, b)| (b, a)) } + + pub fn abort_all(&mut self) { + for (_, handle) in self.abort_handles.drain() { + handle.abort(); + } + } } impl JoinMap { From 939010e72cd35934fead339489cd40233dfa8563 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 14:29:38 +0200 Subject: [PATCH 054/198] chore: clippy --- iroh-willow/src/actor.rs | 2 +- iroh-willow/src/net.rs | 10 ++++--- iroh-willow/src/proto/challenge.rs | 8 +++--- iroh-willow/src/proto/grouping.rs | 6 ++-- iroh-willow/src/proto/sync.rs | 4 +-- iroh-willow/src/proto/willow.rs | 4 +-- iroh-willow/src/session/reconciler.rs | 20 ++++++------- iroh-willow/src/session/resource.rs | 2 +- iroh-willow/src/session/state.rs | 4 +-- iroh-willow/src/store.rs | 41 +++++++++++---------------- iroh-willow/src/util/task.rs | 2 +- 11 files changed, 49 insertions(+), 54 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 4b152d9a69..4937054975 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -319,7 +319,7 @@ impl StorageThread { let on_finish = on_finish_rx .map(|r| match r { Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(Arc::new(err.into())), + Ok(Err(err)) => Err(Arc::new(err)), Err(_) => Err(Arc::new(Error::ActorFailed)), }) .boxed() diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index e44ea3f7a8..c2e980cf7f 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -252,7 +252,7 @@ async fn exchange_commitments( recv_stream: &mut RecvStream, ) -> anyhow::Result { let our_nonce: AccessChallenge = rand::random(); - let challenge_hash = Hash::new(&our_nonce); + let challenge_hash = Hash::new(our_nonce); send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; send_stream.write_all(challenge_hash.as_bytes()).await?; @@ -586,6 +586,7 @@ mod tests { entries } + #[allow(clippy::too_many_arguments)] async fn insert( store: &ActorHandle, payload_store: &P, @@ -617,6 +618,7 @@ mod tests { Ok(()) } + #[allow(clippy::too_many_arguments)] async fn setup_and_insert( mode: SessionMode, rng: &mut impl CryptoRngCore, @@ -667,12 +669,12 @@ mod tests { user_public_key: UserPublicKey, ) -> (ReadCapability, WriteCapability) { let read_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, + namespace_secret, user_public_key, AccessMode::Read, )); let write_capability = McCapability::Owned(OwnedCapability::new( - &namespace_secret, + namespace_secret, user_public_key, AccessMode::Write, )); @@ -687,7 +689,7 @@ mod tests { match std::env::var(var).as_deref() { Ok(val) => val .parse() - .expect(&format!("failed to parse environment variable {var}")), + .unwrap_or_else(|_| panic!("failed to parse environment variable {var}")), Err(_) => default, } } diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 00e5ae22dc..0e0128f5cf 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -26,7 +26,7 @@ impl ChallengeState { our_nonce, received_commitment, } => { - if Hash::new(&their_nonce).as_bytes() != received_commitment { + if Hash::new(their_nonce).as_bytes() != received_commitment { return Err(Error::BrokenCommittement); } let ours = match our_role { @@ -54,20 +54,20 @@ impl ChallengeState { pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { let their_challenge = self.get_theirs()?; - user_key.verify(their_challenge, &signature)?; + user_key.verify(their_challenge, signature)?; Ok(()) } fn get_ours(&self) -> Result<&AccessChallenge, Error> { match self { - Self::Revealed { ours, .. } => Ok(&ours), + Self::Revealed { ours, .. } => Ok(ours), _ => Err(Error::InvalidMessageInCurrentState), } } fn get_theirs(&self) -> Result<&AccessChallenge, Error> { match self { - Self::Revealed { theirs, .. } => Ok(&theirs), + Self::Revealed { theirs, .. } => Ok(theirs), _ => Err(Error::InvalidMessageInCurrentState), } } diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 9ecaeb70ba..1c8f20cc94 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -134,7 +134,7 @@ impl Range { let start = (&self.start).max(&other.start); let end = match (&self.end, &other.end) { (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(b), - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(a.min(&b)), + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(a.min(b)), (RangeEnd::Closed(a), RangeEnd::Open) => RangeEnd::Closed(a), (RangeEnd::Open, RangeEnd::Open) => RangeEnd::Open, }; @@ -313,7 +313,7 @@ impl Area { ThreeDRange { subspaces: Range::new(subspace_start, subspace_end), paths: Range::new(path_start, path_end), - times: self.times.clone(), + times: self.times, } } @@ -338,7 +338,7 @@ pub fn path_range_end(path: &Path) -> RangeEnd { // component can be incremented if out.is_empty() && component.iter().any(|x| *x != 0xff) { let mut bytes = Vec::with_capacity(component.len()); - bytes.copy_from_slice(&component); + bytes.copy_from_slice(component); let incremented = increment_by_one(&mut bytes); debug_assert!(incremented, "checked above"); out.push(Bytes::from(bytes)); diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index a01ad9e11b..034d4b9095 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -698,7 +698,7 @@ pub struct Fingerprint(pub [u8; 32]); impl fmt::Debug for Fingerprint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(&self.0)) + write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(self.0)) } } @@ -706,7 +706,7 @@ impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { // TODO: Don't allocate let next = - Fingerprint(*Hash::new(&entry.encode().expect("encoding not to fail")).as_bytes()); + Fingerprint(*Hash::new(entry.encode().expect("encoding not to fail")).as_bytes()); *self ^= next; } diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index b142401779..8c9ff2311e 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -146,7 +146,7 @@ impl Ord for Path { match other.get(i) { Some(other_component) => match component.cmp(other_component) { Ordering::Equal => continue, - ordering @ _ => return ordering, + ordering => return ordering, }, None => return Ordering::Greater, } @@ -375,7 +375,7 @@ pub mod encodings { for component in self.iter() { let len = component.len() as UPathLengthPower; out.write_all(&len.to_be_bytes())?; - out.write_all(&component)?; + out.write_all(component)?; } Ok(()) } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index d50ec763aa..8031bb7df1 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -236,6 +236,7 @@ impl Reconciler { Ok(()) } + #[allow(clippy::too_many_arguments)] async fn announce_and_send_entries( &mut self, namespace: NamespaceId, @@ -248,7 +249,7 @@ impl Reconciler { ) -> Result<(), Error> { let our_entry_count = match our_entry_count { Some(count) => count, - None => self.snapshot.count(namespace, &range)?, + None => self.snapshot.count(namespace, range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), @@ -265,7 +266,7 @@ impl Reconciler { self.send(msg).await?; for authorised_entry in self .snapshot - .get_entries_with_authorisation(namespace, &range) + .get_entries_with_authorisation(namespace, range) { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); @@ -288,8 +289,8 @@ impl Reconciler { // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; let chunk_size = 1024 * 64; - if send_payloads { - if send_payload_chunked( + if send_payloads + && send_payload_chunked( digest, &self.payload_store, &self.session, @@ -297,10 +298,9 @@ impl Reconciler { |bytes| ReconciliationSendPayload { bytes }.into(), ) .await? - { - let msg = ReconciliationTerminatePayload; - self.send(msg).await?; - } + { + let msg = ReconciliationTerminatePayload; + self.send(msg).await?; } } Ok(()) @@ -318,11 +318,11 @@ impl Reconciler { let config = SyncConfig::default(); // clone to avoid borrow checker trouble let snapshot = self.snapshot.clone(); - let mut iter = snapshot.split_range(namespace, &range, &config)?.peekable(); + let mut iter = snapshot.split_range(namespace, range, &config)?.peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; let is_last = iter.peek().is_none(); - let covers = is_last.then(|| range_count); + let covers = is_last.then_some(range_count); match action { SplitAction::SendEntries(count) => { self.announce_and_send_entries( diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 2ad1c5716f..1507bd9894 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -35,7 +35,7 @@ impl ResourceMaps { where F: for<'a> Fn(&'a Self) -> &'a ResourceMap, { - let res = selector(&self); + let res = selector(self); res.try_get(&handle).cloned() } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 75c6190954..8772ee8222 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -281,10 +281,10 @@ impl Session { .get_their_resource_eventually(|r| &mut r.areas_of_interest, sender_handle) .await; - if !our_aoi.area().includes_range(&range) || !their_aoi.area().includes_range(&range) { + if !our_aoi.area().includes_range(range) || !their_aoi.area().includes_range(range) { return Err(Error::RangeOutsideCapability); } - Ok(our_namespace.into()) + Ok(our_namespace) } pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 1eb53d500c..f9876bea0e 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -121,7 +121,7 @@ impl Shared { impl Shared { pub fn snapshot(&self) -> Result { - Ok(self.0.borrow_mut().snapshot()?) + self.0.borrow_mut().snapshot() } pub fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { @@ -166,14 +166,15 @@ pub struct MemoryKeyStore { impl KeyStore for MemoryKeyStore { fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError> { - Ok(match secret { + match secret { meadowcap::SecretKey::User(secret) => { self.user.insert(secret.id(), secret); } meadowcap::SecretKey::Namespace(secret) => { self.namespace.insert(secret.id(), secret); } - }) + }; + Ok(()) } fn get_user(&self, id: &UserId) -> Option<&UserSecretKey> { @@ -185,19 +186,11 @@ impl KeyStore for MemoryKeyStore { } } -#[derive(Debug)] +#[derive(Debug, Default)] pub struct MemoryStore { entries: HashMap>, } -impl Default for MemoryStore { - fn default() -> Self { - Self { - entries: Default::default(), - } - } -} - impl ReadonlyStore for MemoryStore { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { let mut fingerprint = Fingerprint::default(); @@ -235,38 +228,38 @@ impl ReadonlyStore for MemoryStore { ranges.push(ThreeDRange::new( Range::new(range.subspaces.start, RangeEnd::Closed(mid.subspace_id)), range.paths.clone(), - range.times.clone(), + range.times, )); ranges.push(ThreeDRange::new( Range::new(mid.subspace_id, range.subspaces.end), range.paths.clone(), - range.times.clone(), + range.times, )); } // split by path else if mid.path != range.paths.start { ranges.push(ThreeDRange::new( - range.subspaces.clone(), + range.subspaces, Range::new( range.paths.start.clone(), RangeEnd::Closed(mid.path.clone()), ), - range.times.clone(), + range.times, )); ranges.push(ThreeDRange::new( - range.subspaces.clone(), + range.subspaces, Range::new(mid.path.clone(), range.paths.end.clone()), - range.times.clone(), + range.times, )); // split by time } else { ranges.push(ThreeDRange::new( - range.subspaces.clone(), + range.subspaces, range.paths.clone(), Range::new(range.times.start, RangeEnd::Closed(mid.timestamp)), )); ranges.push(ThreeDRange::new( - range.subspaces.clone(), + range.subspaces, range.paths.clone(), Range::new(mid.timestamp, range.times.end), )); @@ -301,7 +294,7 @@ impl ReadonlyStore for MemoryStore { impl ReadonlyStore for Arc { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - MemoryStore::fingerprint(&self, namespace, range) + MemoryStore::fingerprint(self, namespace, range) } fn split_range( @@ -310,11 +303,11 @@ impl ReadonlyStore for Arc { range: &ThreeDRange, config: &SyncConfig, ) -> Result>> { - MemoryStore::split_range(&self, namespace, range, config) + MemoryStore::split_range(self, namespace, range, config) } fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - MemoryStore::count(&self, namespace, range) + MemoryStore::count(self, namespace, range) } fn get_entries_with_authorisation<'a>( @@ -322,7 +315,7 @@ impl ReadonlyStore for Arc { namespace: NamespaceId, range: &ThreeDRange, ) -> impl Iterator> + 'a { - MemoryStore::get_entries_with_authorisation(&self, namespace, range) + MemoryStore::get_entries_with_authorisation(self, namespace, range) } } diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index 40720cdf73..dbeaabc146 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -72,7 +72,7 @@ impl JoinMap { /// Remove a task from the map. pub fn remove(&mut self, task_key: &TaskKey) -> bool { - self.keys.remove(&task_key); + self.keys.remove(task_key); self.tasks.remove(task_key.0) } From 665665dac80dfefbcf1b9360949c0edc2d8d1c9d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 14:31:20 +0200 Subject: [PATCH 055/198] cleanup --- iroh-willow/cuts.rs | 39 --------------------------------- iroh-willow/src/net.rs | 6 ++--- iroh-willow/src/session.rs | 1 - iroh-willow/src/session/util.rs | 33 ---------------------------- 4 files changed, 2 insertions(+), 77 deletions(-) delete mode 100644 iroh-willow/cuts.rs delete mode 100644 iroh-willow/src/session/util.rs diff --git a/iroh-willow/cuts.rs b/iroh-willow/cuts.rs deleted file mode 100644 index 640ba9dfba..0000000000 --- a/iroh-willow/cuts.rs +++ /dev/null @@ -1,39 +0,0 @@ - -// async fn recv_bulk( -// &self, -// channel: LogicalChannel, -// ) -> Option>> { -// let receiver = self.channels.receiver(channel); -// let mut buf = SmallVec::<[Message; N]>::new(); -// loop { -// match receiver.read_message_or_set_notify() { -// Err(err) => return Some(Err(err)), -// Ok(outcome) => match outcome { -// ReadOutcome::Closed => { -// if buf.is_empty() { -// debug!("recv: closed"); -// return None; -// } else { -// return Some(Ok(buf)); -// } -// } -// ReadOutcome::ReadBufferEmpty => { -// if buf.is_empty() { -// self.co -// .yield_(Yield::Pending(Readyness::Channel(channel, Interest::Recv))) -// .await; -// } else { -// return Some(Ok(buf)); -// } -// } -// ReadOutcome::Item(message) => { -// debug!(%message, "recv"); -// buf.push(message); -// if buf.len() == N { -// return Some(Ok(buf)); -// } -// } -// }, -// } -// } -// } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index c2e980cf7f..a9bd430b70 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -375,8 +375,7 @@ mod tests { ) .await?; - debug!("init constructed"); - println!("init took {:?}", start.elapsed()); + info!("init took {:?}", start.elapsed()); let start = Instant::now(); let (session_alfie, session_betty) = tokio::join!( @@ -479,8 +478,7 @@ mod tests { ) .await?; - debug!("init constructed"); - println!("init took {:?}", start.elapsed()); + info!("init took {:?}", start.elapsed()); let start = Instant::now(); let (done_tx, done_rx) = tokio::sync::oneshot::channel(); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index b6e8284f58..f76e222489 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -13,7 +13,6 @@ mod reconciler; mod resource; mod run; mod state; -mod util; pub use self::channels::Channels; pub use self::error::Error; diff --git a/iroh-willow/src/session/util.rs b/iroh-willow/src/session/util.rs deleted file mode 100644 index 43e154eb0a..0000000000 --- a/iroh-willow/src/session/util.rs +++ /dev/null @@ -1,33 +0,0 @@ -// use crate::{ -// proto::{grouping::ThreeDRange, keys::NamespaceId, sync::AreaOfInterestHandle}, -// store::{Store, SyncConfig}, -// session::Error, -// }; - -// pub struct SplitRange { -// snapshot: Snapshot, -// args: SplitRangeArgs, -// config: SyncConfig, -// } -// -// pub struct SplitRangeArgs { -// namespace: NamespaceId, -// range: ThreeDRange, -// our_handle: AreaOfInterestHandle, -// their_handle: AreaOfInterestHandle, -// } -// -// pub enum Yield { -// Done, -// OutboxFull, -// } -// -// fn run(mut state: SplitRange) -> Result<(), Error> { -// let SplitRange { -// snapshot: store, -// args, -// config, -// } = &mut state; -// let iter = store.split_range(args.namespace, &args.range, &config)?; -// Ok(()) -// } From 4256f46650169dedca756617d71e3c8c92f91603 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 14:36:32 +0200 Subject: [PATCH 056/198] fixup after rebase --- iroh-willow/src/net.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index a9bd430b70..f662d64bd1 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -2,7 +2,7 @@ use anyhow::ensure; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; -use iroh_net::magic_endpoint::{Connection, RecvStream, SendStream}; +use iroh_net::endpoint::{Connection, RecvStream, SendStream}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, task::JoinSet, @@ -37,7 +37,7 @@ pub async fn run( our_role: Role, init: SessionInit, ) -> anyhow::Result { - let peer = iroh_net::magic_endpoint::get_remote_node_id(&conn)?; + let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; Span::current().record("peer", tracing::field::display(peer.fmt_short())); debug!(?our_role, "connected"); @@ -305,7 +305,7 @@ mod tests { use futures_lite::StreamExt; use iroh_base::key::SecretKey; use iroh_blobs::store::Store as PayloadStore; - use iroh_net::{MagicEndpoint, NodeAddr, NodeId}; + use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; use rand_core::CryptoRngCore; use tracing::{debug, info}; @@ -553,8 +553,8 @@ mod tests { pub async fn create_endpoint( rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<(MagicEndpoint, NodeId, NodeAddr)> { - let ep = MagicEndpoint::builder() + ) -> anyhow::Result<(Endpoint, NodeId, NodeAddr)> { + let ep = Endpoint::builder() .secret_key(SecretKey::generate_with_rng(rng)) .alpns(vec![ALPN.to_vec()]) .bind(0) From a5803b85a7261b1659cfc39456f77d80bfe08cb9 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 14:59:19 +0200 Subject: [PATCH 057/198] revert iroh-base change --- iroh-base/src/base32.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-base/src/base32.rs b/iroh-base/src/base32.rs index 61ec333508..a87153a166 100644 --- a/iroh-base/src/base32.rs +++ b/iroh-base/src/base32.rs @@ -18,7 +18,7 @@ pub fn fmt_append(bytes: impl AsRef<[u8]>, out: &mut String) { /// Convert to a base32 string limited to the first 10 bytes pub fn fmt_short(bytes: impl AsRef<[u8]>) -> String { - let len = bytes.as_ref().len().min(5); + let len = bytes.as_ref().len().min(10); let mut text = data_encoding::BASE32_NOPAD.encode(&bytes.as_ref()[..len]); text.make_ascii_lowercase(); text From c34324e0108bd00288a9723e83a1d7106720ad0b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 22:28:43 +0200 Subject: [PATCH 058/198] refactor: store traits and generics --- iroh-willow/src/actor.rs | 53 ++-- iroh-willow/src/net.rs | 34 +-- iroh-willow/src/session/data.rs | 45 +-- iroh-willow/src/session/error.rs | 4 +- iroh-willow/src/session/reconciler.rs | 32 +- iroh-willow/src/session/run.rs | 40 ++- iroh-willow/src/session/state.rs | 6 +- iroh-willow/src/store.rs | 412 +++++++------------------- iroh-willow/src/store/broadcaster.rs | 134 --------- iroh-willow/src/store/memory.rs | 233 +++++++++++++++ iroh-willow/src/store/traits.rs | 117 ++++++++ 11 files changed, 532 insertions(+), 578 deletions(-) delete mode 100644 iroh-willow/src/store/broadcaster.rs create mode 100644 iroh-willow/src/store/memory.rs create mode 100644 iroh-willow/src/store/traits.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 4937054975..b5fc7f524b 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; -use iroh_blobs::store::Store as PayloadStore; use tokio::sync::oneshot; use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; @@ -18,8 +17,8 @@ use crate::{ }, session::{Channels, Error, Role, Session, SessionInit}, store::{ - broadcaster::{Broadcaster, Origin}, - EntryStore, KeyStore, ReadonlyStore, Shared, + traits::{EntryReader, SecretStorage, Storage}, + Origin, Store, }, util::task::{JoinMap, TaskKey}, }; @@ -35,10 +34,8 @@ pub struct ActorHandle { } impl ActorHandle { - pub fn spawn( - store: S, - key_store: K, - payload_store: P, + pub fn spawn( + create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, ) -> ActorHandle { let (tx, rx) = flume::bounded(INBOX_CAP); @@ -48,11 +45,10 @@ impl ActorHandle { let span = error_span!("willow_thread", me=%me.fmt_short()); let _guard = span.enter(); - let store = Broadcaster::new(Shared::new(store)); - let actor = StorageThread { + let store = (create_store)(); + let store = Store::new(store); + let actor = Actor { store, - key_store: Shared::new(key_store), - payload_store, sessions: Default::default(), inbox_rx: rx, next_session_id: 0, @@ -228,17 +224,15 @@ struct ActiveSession { } #[derive(Debug)] -pub struct StorageThread { +pub struct Actor { inbox_rx: flume::Receiver, - store: Broadcaster, - key_store: Shared, - payload_store: P, + store: Store, next_session_id: u64, sessions: HashMap, session_tasks: JoinMap>, } -impl StorageThread { +impl Actor { pub fn run(self) -> anyhow::Result<()> { let rt = tokio::runtime::Builder::new_current_thread() .build() @@ -297,14 +291,11 @@ impl StorageThread { let id = self.next_session_id(); let session = Session::new(id, init.mode, our_role, send, initial_transmission); - let store: Broadcaster = self.store.clone(); - let key_store = self.key_store.clone(); - let payload_store = self.payload_store.clone(); - + let store = self.store.clone(); let finish = CancellationToken::new(); let future = session - .run(store, key_store, payload_store, recv, init, finish.clone()) + .run(store, recv, init, finish.clone()) .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); @@ -332,7 +323,7 @@ impl StorageThread { range, reply, } => { - let snapshot = self.store.snapshot(); + let snapshot = self.store.entries().snapshot(); match snapshot { Ok(snapshot) => { iter_to_channel(reply, Ok(snapshot.get_entries(namespace, &range))) @@ -341,7 +332,7 @@ impl StorageThread { } } ToActor::IngestEntry { entry, reply } => { - let res = self.store.ingest_entry(&entry, Origin::Local); + let res = self.store.entries().ingest_entry(&entry, Origin::Local); send_reply(reply, res) } ToActor::InsertEntry { @@ -350,17 +341,19 @@ impl StorageThread { reply, } => send_reply_with(reply, self, |slf| { let user_id = capability.receiver().id(); - let secret_key = slf - .key_store + let user_secret = slf + .store + .secrets() .get_user(&user_id) .ok_or(Error::MissingUserKey(user_id))?; - let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; + let authorised_entry = entry.attach_authorisation(capability, &user_secret)?; slf.store + .entries() .ingest_entry(&authorised_entry, Origin::Local) .map_err(Error::Store) }), ToActor::InsertSecret { secret, reply } => { - let res = self.key_store.insert(secret); + let res = self.store.secrets().insert(secret); send_reply(reply, res.map_err(anyhow::Error::from)) } } @@ -384,10 +377,10 @@ fn send_reply(sender: oneshot::Sender, value: T) -> Result<(), SendReplyEr sender.send(value).map_err(send_reply_error) } -fn send_reply_with( +fn send_reply_with( sender: oneshot::Sender>, - this: &mut StorageThread, - f: impl FnOnce(&mut StorageThread) -> Result, + this: &mut Actor, + f: impl FnOnce(&mut Actor) -> Result, ) -> Result<(), SendReplyError> { sender.send(f(this)).map_err(send_reply_error) } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index f662d64bd1..38a0f9b7e7 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -321,7 +321,7 @@ mod tests { willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Role, SessionInit, SessionMode}, - store::{MemoryKeyStore, MemoryStore}, + store::memory, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -349,8 +349,8 @@ mod tests { let start = Instant::now(); let mut expected_entries = BTreeSet::new(); - let (handle_alfie, payloads_alfie) = create_stores(node_id_alfie); - let (handle_betty, payloads_betty) = create_stores(node_id_betty); + let (handle_alfie, payloads_alfie) = create_willow(node_id_alfie); + let (handle_betty, payloads_betty) = create_willow(node_id_betty); let (init_alfie, _) = setup_and_insert( SessionMode::ReconcileOnce, @@ -435,25 +435,8 @@ mod tests { let start = Instant::now(); let mut expected_entries = BTreeSet::new(); - let store_alfie = MemoryStore::default(); - let keys_alfie = MemoryKeyStore::default(); - let payloads_alfie = iroh_blobs::store::mem::Store::default(); - let handle_alfie = ActorHandle::spawn( - store_alfie, - keys_alfie, - payloads_alfie.clone(), - node_id_alfie, - ); - - let store_betty = MemoryStore::default(); - let keys_betty = MemoryKeyStore::default(); - let payloads_betty = iroh_blobs::store::mem::Store::default(); - let handle_betty = ActorHandle::spawn( - store_betty, - keys_betty, - payloads_betty.clone(), - node_id_betty, - ); + let (handle_alfie, payloads_alfie) = create_willow(node_id_alfie); + let (handle_betty, payloads_betty) = create_willow(node_id_betty); let (init_alfie, cap_alfie) = setup_and_insert( SessionMode::Live, @@ -564,11 +547,10 @@ mod tests { Ok((ep, node_id, addr)) } - pub fn create_stores(me: NodeId) -> (ActorHandle, iroh_blobs::store::mem::Store) { - let store = MemoryStore::default(); - let keys = MemoryKeyStore::default(); + pub fn create_willow(me: NodeId) -> (ActorHandle, iroh_blobs::store::mem::Store) { let payloads = iroh_blobs::store::mem::Store::default(); - let handle = ActorHandle::spawn(store, keys, payloads.clone(), me); + let payloads_clone = payloads.clone(); + let handle = ActorHandle::spawn(move || memory::Store::new(payloads_clone), me); (handle, payloads) } diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 168cbd6fd7..c6a6923223 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -1,6 +1,5 @@ use futures_lite::StreamExt; -use iroh_blobs::store::Store as PayloadStore; use tokio::sync::broadcast; use crate::{ @@ -9,10 +8,7 @@ use crate::{ willow::AuthorisedEntry, }, session::Error, - store::{ - broadcaster::{Broadcaster, Origin}, - EntryStore, - }, + store::{traits::Storage, Origin, Store}, }; use super::channels::MessageReceiver; @@ -20,22 +16,17 @@ use super::payload::{send_payload_chunked, CurrentPayload}; use super::Session; #[derive(derive_more::Debug)] -pub struct DataSender { +pub struct DataSender { session: Session, - store: Broadcaster, - payload_store: P, + store: Store, } -impl DataSender { - pub fn new(session: Session, store: Broadcaster, payload_store: P) -> Self { - Self { - session, - store, - payload_store, - } +impl DataSender { + pub fn new(session: Session, store: Store) -> Self { + Self { session, store } } pub async fn run(mut self) -> Result<(), Error> { - let mut stream = self.store.subscribe(*self.session.id()); + let mut stream = self.store.entries().subscribe(*self.session.id()); loop { match stream.recv().await { Ok(entry) => { @@ -75,7 +66,7 @@ impl DataSender { if send_payloads { send_payload_chunked( digest, - &self.payload_store, + self.store.payloads(), &self.session, chunk_size, |bytes| DataSendPayload { bytes }.into(), @@ -87,25 +78,18 @@ impl DataSender { } #[derive(derive_more::Debug)] -pub struct DataReceiver { +pub struct DataReceiver { session: Session, - store: Broadcaster, - payload_store: P, + store: Store, current_payload: CurrentPayload, recv: MessageReceiver, } -impl DataReceiver { - pub fn new( - session: Session, - store: Broadcaster, - payload_store: P, - recv: MessageReceiver, - ) -> Self { +impl DataReceiver { + pub fn new(session: Session, store: Store, recv: MessageReceiver) -> Self { Self { session, store, - payload_store, current_payload: Default::default(), recv, } @@ -121,7 +105,7 @@ impl DataReceiver { match message { DataMessage::SendEntry(message) => self.on_send_entry(message).await?, DataMessage::SendPayload(message) => self.on_send_payload(message).await?, - DataMessage::SetMetadata(_) => todo!(), + DataMessage::SetMetadata(_) => {} } Ok(()) } @@ -137,6 +121,7 @@ impl DataReceiver { ) .await?; self.store + .entries() .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; self.current_payload .set(authorised_entry.into_entry(), None)?; @@ -145,7 +130,7 @@ impl DataReceiver { async fn on_send_payload(&mut self, message: DataSendPayload) -> Result<(), Error> { self.current_payload - .recv_chunk(self.payload_store.clone(), message.bytes) + .recv_chunk(self.store.payloads().clone(), message.bytes) .await?; if self.current_payload.is_complete() { self.current_payload.finalize().await?; diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index ef171416a5..aeda22b4fb 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -6,7 +6,7 @@ use crate::{ sync::ResourceHandle, willow::Unauthorised, }, - store::KeyStoreError, + store::traits::SecretStoreError, util::channel::{ReadError, WriteError}, }; @@ -21,7 +21,7 @@ pub enum Error { #[error("payload size does not match expected size")] PayloadSizeMismatch, #[error("local store failed: {0}")] - KeyStore(#[from] KeyStoreError), + KeyStore(#[from] SecretStoreError), #[error("failed to receive data: {0}")] Receive(#[from] ReadError), #[error("failed to send data: {0}")] diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 8031bb7df1..282e164d33 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,5 +1,4 @@ use futures_lite::StreamExt; -use iroh_blobs::store::Store as PayloadStore; use tracing::{debug, trace}; use crate::{ @@ -14,38 +13,36 @@ use crate::{ }, }, session::{ - channels::MessageReceiver, payload::send_payload_chunked, payload::CurrentPayload, + channels::MessageReceiver, + payload::{send_payload_chunked, CurrentPayload}, AreaOfInterestIntersection, Error, Session, }, store::{ - broadcaster::{Broadcaster, Origin}, - EntryStore, ReadonlyStore, SplitAction, SyncConfig, + traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, + Origin, Store, }, util::channel::WriteError, }; #[derive(derive_more::Debug)] -pub struct Reconciler { +pub struct Reconciler { session: Session, - store: Broadcaster, + store: Store, recv: MessageReceiver, - snapshot: S::Snapshot, + snapshot: ::Snapshot, current_payload: CurrentPayload, - payload_store: P, } -impl Reconciler { +impl Reconciler { pub fn new( session: Session, - store: Broadcaster, - payload_store: P, + store: Store, recv: MessageReceiver, ) -> Result { - let snapshot = store.snapshot()?; + let snapshot = store.entries().snapshot()?; Ok(Self { recv, store, - payload_store, snapshot, session, current_payload: CurrentPayload::new(), @@ -64,7 +61,7 @@ impl Reconciler { } Some(intersection) = self.session.next_aoi_intersection() => { if self.session.mode().is_live() { - self.store.watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); + self.store.entries().watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); } if our_role.is_alfie() { self.initiate(intersection).await?; @@ -195,6 +192,7 @@ impl Reconciler { ) .await?; self.store + .entries() .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; self.current_payload .set(authorised_entry.into_entry(), Some(message.entry.available))?; @@ -203,7 +201,7 @@ impl Reconciler { async fn on_send_payload(&mut self, message: ReconciliationSendPayload) -> Result<(), Error> { self.current_payload - .recv_chunk(self.payload_store.clone(), message.bytes) + .recv_chunk(self.store.payloads().clone(), message.bytes) .await?; Ok(()) } @@ -292,7 +290,7 @@ impl Reconciler { if send_payloads && send_payload_chunked( digest, - &self.payload_store, + self.store.payloads(), &self.session, chunk_size, |bytes| ReconciliationSendPayload { bytes }.into(), @@ -315,7 +313,7 @@ impl Reconciler { range_count: u64, ) -> Result<(), Error> { // TODO: expose this config - let config = SyncConfig::default(); + let config = SplitOpts::default(); // clone to avoid borrow checker trouble let snapshot = self.snapshot.clone(); let mut iter = snapshot.split_range(namespace, range, &config)?.peekable(); diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 7dd1ba3f98..1baadbe416 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,5 +1,4 @@ use futures_lite::StreamExt; -use iroh_blobs::store::Store as PayloadStore; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace}; @@ -7,7 +6,7 @@ use tracing::{debug, error_span, trace}; use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, - store::{broadcaster::Broadcaster, EntryStore, KeyStore, Shared}, + store::{traits::Storage, Store}, util::channel::Receiver, }; @@ -21,11 +20,9 @@ use super::{ const INITIAL_GUARANTEES: u64 = u64::MAX; impl Session { - pub async fn run( + pub async fn run( self, - store: Broadcaster, - key_store: Shared, - payload_store: P, + store: Store, recv: ChannelReceivers, init: SessionInit, finish: CancellationToken, @@ -54,19 +51,15 @@ impl Session { if init.mode == SessionMode::Live { self.spawn(error_span!("dat:r"), { let store = store.clone(); - let payload_store = payload_store.clone(); move |session| async move { - DataReceiver::new(session, store, payload_store, data_recv) - .run() - .await?; + DataReceiver::new(session, store, data_recv).run().await?; Ok(()) } }); self.spawn(error_span!("dat:s"), { let store = store.clone(); - let payload_store = payload_store.clone(); move |session| async move { - DataSender::new(session, store, payload_store).run().await?; + DataSender::new(session, store).run().await?; Ok(()) } }); @@ -93,7 +86,7 @@ impl Session { let finish = finish.clone(); let store = store.clone(); move |session| async move { - let res = Reconciler::new(session, store, payload_store, reconciliation_recv)? + let res = Reconciler::new(session, store, reconciliation_recv)? .run() .await; finish.cancel(); @@ -104,8 +97,9 @@ impl Session { // Spawn a task to handle control messages self.spawn(error_span!("ctl"), { let finish = finish.clone(); + let store = store.clone(); move |session| async move { - let res = control_loop(session, key_store, control_recv, init).await; + let res = control_loop(session, store, control_recv, init).await; finish.cancel(); res } @@ -121,7 +115,7 @@ impl Session { // closed their senders as well. session.close_senders(); // Unsubscribe from the store. This stops the data send task. - store.unsubscribe(session.id()); + store.entries().unsubscribe(session.id()); Ok(()) }); @@ -144,9 +138,9 @@ impl Session { } } -async fn control_loop( +async fn control_loop( session: Session, - key_store: Shared, + store: Store, mut control_recv: Receiver, init: SessionInit, ) -> Result<(), Error> { @@ -172,9 +166,9 @@ async fn control_loop( session.on_commitment_reveal(msg)?; let init = init.take().ok_or(Error::InvalidMessageInCurrentState)?; // send setup messages, but in a separate task to not block incoming guarantees - let key_store = key_store.clone(); - session.spawn(error_span!("setup"), |session| { - setup(key_store, session, init) + let store = store.clone(); + session.spawn(error_span!("setup"), move |session| { + setup(store, session, init) }); } Message::ControlIssueGuarantee(msg) => { @@ -189,8 +183,8 @@ async fn control_loop( Ok(()) } -async fn setup( - key_store: Shared, +async fn setup( + store: Store, session: Session, init: SessionInit, ) -> Result<(), Error> { @@ -199,7 +193,7 @@ async fn setup( // TODO: implement private area intersection let intersection_handle = 0.into(); let (our_capability_handle, message) = session.bind_and_sign_capability( - &key_store, + store.secrets(), intersection_handle, capability.clone(), )?; diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 8772ee8222..ad7acac3ba 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -25,7 +25,7 @@ use crate::{ }, willow::{AuthorisedEntry, Entry}, }, - store::{KeyStore, Shared}, + store::traits::SecretStorage, util::{channel::WriteError, queue::Queue, task::JoinMap}, }; @@ -175,9 +175,9 @@ impl Session { .await } - pub fn bind_and_sign_capability( + pub fn bind_and_sign_capability( &self, - key_store: &Shared, + key_store: &K, our_intersection_handle: IntersectionHandle, capability: ReadCapability, ) -> Result<(CapabilityHandle, Option), Error> { diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index f9876bea0e..5d18638b3f 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,368 +1,154 @@ -use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc}; - -use anyhow::Result; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; +use tokio::sync::broadcast; use crate::{ actor::SessionId, proto::{ - grouping::{Range, RangeEnd, ThreeDRange}, - keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap, - sync::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId}, + grouping::Area, + willow::{AuthorisedEntry, NamespaceId}, }, }; -pub mod broadcaster; +use self::traits::{EntryStorage, Storage}; -#[derive(Debug, Clone, Copy)] -pub struct SyncConfig { - /// Up to how many values to send immediately, before sending only a fingerprint. - pub max_set_size: usize, - /// `k` in the protocol, how many splits to generate. at least 2 - pub split_factor: usize, -} +pub mod memory; +pub mod traits; -impl Default for SyncConfig { - fn default() -> Self { - SyncConfig { - max_set_size: 1, - split_factor: 2, - } - } -} - -#[derive(Debug, thiserror::Error)] -pub enum KeyStoreError { - #[error("store failed: {0}")] - Store(#[from] anyhow::Error), - #[error("missing secret key")] - MissingKey, -} +const BROADCAST_CAP: usize = 1024; -#[derive(Debug, Copy, Clone)] -pub enum KeyScope { - Namespace, - User, +#[derive(Debug, Clone, Copy)] +pub enum Origin { + Local, + Remote(SessionId), } -pub trait KeyStore: Send + 'static { - fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError>; - fn get_user(&self, id: &UserId) -> Option<&UserSecretKey>; - fn get_namespace(&self, id: &NamespaceId) -> Option<&NamespaceSecretKey>; - - fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { - Ok(self - .get_user(id) - .ok_or(KeyStoreError::MissingKey)? - .sign(message)) - } - fn sign_namespace( - &self, - id: &NamespaceId, - message: &[u8], - ) -> Result { - Ok(self - .get_namespace(id) - .ok_or(KeyStoreError::MissingKey)? - .sign(message)) - } +#[derive(Debug, Clone)] +pub struct Store { + storage: S, + entries: EntryStore<::Entries>, } -pub trait EntryStore: ReadonlyStore + 'static { - type Snapshot: ReadonlyStore + Clone + Send; - - fn snapshot(&mut self) -> Result; - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result; +#[derive(Debug, Clone)] +pub struct EntryStore { + storage: ES, + broadcast: Arc>, } -pub trait ReadonlyStore: Send + 'static { - fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; - - fn split_range( - &self, - namespace: NamespaceId, - range: &ThreeDRange, - config: &SyncConfig, - ) -> Result>>; - - fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; - - fn get_entries_with_authorisation<'a>( - &'a self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> impl Iterator> + 'a; - - fn get_entries<'a>( - &'a self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> impl Iterator> + 'a { - self.get_entries_with_authorisation(namespace, range) - .map(|e| e.map(|e| e.into_entry())) +impl Store { + pub fn entries(&self) -> &EntryStore { + &self.entries } -} -#[derive(Debug)] -pub struct Shared(Rc>); - -impl Clone for Shared { - fn clone(&self) -> Self { - Self(Rc::clone(&self.0)) + pub fn secrets(&self) -> &S::Secrets { + self.storage.secrets() } -} -impl Shared { - pub fn new(inner: S) -> Self { - Self(Rc::new(RefCell::new(inner))) + pub fn payloads(&self) -> &S::Payloads { + self.storage.payloads() } } -impl Shared { - pub fn snapshot(&self) -> Result { - self.0.borrow_mut().snapshot() +impl EntryStore { + pub fn reader(&self) -> ES::Reader { + self.storage.reader() } - pub fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { - self.0.borrow_mut().ingest_entry(entry) - } - pub fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - self.0.borrow().fingerprint(namespace, range) - } -} - -impl Shared { - pub fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError> { - self.0.borrow_mut().insert(secret) + pub fn snapshot(&self) -> anyhow::Result { + self.storage.snapshot() } - pub fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { - self.0.borrow().sign_user(id, message) + pub fn ingest_entry(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { + if self.storage.ingest_entry(entry)? { + self.broadcast.lock().unwrap().broadcast(entry, origin); + Ok(true) + } else { + Ok(false) + } } - pub fn sign_namespace( - &self, - id: &NamespaceId, - message: &[u8], - ) -> Result { - self.0.borrow().sign_namespace(id, message) + pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { + self.broadcast.lock().unwrap().subscribe(session_id) } - pub fn get_user(&self, id: &UserId) -> Option { - self.0.borrow().get_user(id).cloned() + pub fn unsubscribe(&self, session_id: &SessionId) { + self.broadcast.lock().unwrap().unsubscribe(session_id) } - pub fn get_namespace(&self, id: &NamespaceId) -> Option { - self.0.borrow().get_namespace(id).cloned() + pub fn watch_area(&self, session: SessionId, namespace: NamespaceId, area: Area) { + self.broadcast + .lock() + .unwrap() + .watch_area(session, namespace, area); } } -#[derive(Debug, Default)] -pub struct MemoryKeyStore { - user: HashMap, - namespace: HashMap, -} - -impl KeyStore for MemoryKeyStore { - fn insert(&mut self, secret: meadowcap::SecretKey) -> Result<(), KeyStoreError> { - match secret { - meadowcap::SecretKey::User(secret) => { - self.user.insert(secret.id(), secret); - } - meadowcap::SecretKey::Namespace(secret) => { - self.namespace.insert(secret.id(), secret); - } - }; - Ok(()) - } - - fn get_user(&self, id: &UserId) -> Option<&UserSecretKey> { - self.user.get(id) +impl Store { + pub fn new(store: S) -> Self { + Self { + entries: EntryStore { + storage: store.entries().clone(), + broadcast: Default::default(), + }, + storage: store, + } } - fn get_namespace(&self, id: &NamespaceId) -> Option<&NamespaceSecretKey> { - self.namespace.get(id) + pub fn entry_broadcast(&self) -> &EntryStore { + &self.entries } } #[derive(Debug, Default)] -pub struct MemoryStore { - entries: HashMap>, -} - -impl ReadonlyStore for MemoryStore { - fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - let mut fingerprint = Fingerprint::default(); - for entry in self.get_entries(namespace, range) { - let entry = entry?; - fingerprint.add_entry(&entry); - } - Ok(fingerprint) - } - - fn split_range( - &self, - namespace: NamespaceId, - range: &ThreeDRange, - config: &SyncConfig, - ) -> Result>> { - let count = self.get_entries(namespace, range).count(); - if count <= config.max_set_size { - return Ok( - vec![Ok((range.clone(), SplitAction::SendEntries(count as u64)))].into_iter(), - ); - } - let mut entries: Vec = self - .get_entries(namespace, range) - .filter_map(|e| e.ok()) - .collect(); - - entries.sort_by(|e1, e2| e1.as_set_sort_tuple().cmp(&e2.as_set_sort_tuple())); - - let split_index = count / 2; - let mid = entries.get(split_index).expect("not empty"); - let mut ranges = vec![]; - // split in two halves by subspace - if mid.subspace_id != range.subspaces.start { - ranges.push(ThreeDRange::new( - Range::new(range.subspaces.start, RangeEnd::Closed(mid.subspace_id)), - range.paths.clone(), - range.times, - )); - ranges.push(ThreeDRange::new( - Range::new(mid.subspace_id, range.subspaces.end), - range.paths.clone(), - range.times, - )); - } - // split by path - else if mid.path != range.paths.start { - ranges.push(ThreeDRange::new( - range.subspaces, - Range::new( - range.paths.start.clone(), - RangeEnd::Closed(mid.path.clone()), - ), - range.times, - )); - ranges.push(ThreeDRange::new( - range.subspaces, - Range::new(mid.path.clone(), range.paths.end.clone()), - range.times, - )); - // split by time - } else { - ranges.push(ThreeDRange::new( - range.subspaces, - range.paths.clone(), - Range::new(range.times.start, RangeEnd::Closed(mid.timestamp)), - )); - ranges.push(ThreeDRange::new( - range.subspaces, - range.paths.clone(), - Range::new(mid.timestamp, range.times.end), - )); - } - let mut out = vec![]; - for range in ranges { - let fingerprint = self.fingerprint(namespace, &range)?; - out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); - } - Ok(out.into_iter()) - } - - fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - Ok(self.get_entries(namespace, range).count() as u64) - } - - fn get_entries_with_authorisation<'a>( - &'a self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> impl Iterator> + 'a { - self.entries - .get(&namespace) - .into_iter() - .flatten() - .filter(|entry| range.includes_entry(entry.entry())) - .map(|e| Result::<_, anyhow::Error>::Ok(e.clone())) - .collect::>() - .into_iter() - } +struct BroadcastInner { + senders: HashMap>, + areas: HashMap>>, } -impl ReadonlyStore for Arc { - fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - MemoryStore::fingerprint(self, namespace, range) - } - - fn split_range( - &self, - namespace: NamespaceId, - range: &ThreeDRange, - config: &SyncConfig, - ) -> Result>> { - MemoryStore::split_range(self, namespace, range, config) - } - - fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { - MemoryStore::count(self, namespace, range) +impl BroadcastInner { + fn subscribe(&mut self, session: SessionId) -> broadcast::Receiver { + self.senders + .entry(session) + .or_insert_with(|| broadcast::Sender::new(BROADCAST_CAP)) + .subscribe() } - fn get_entries_with_authorisation<'a>( - &'a self, - namespace: NamespaceId, - range: &ThreeDRange, - ) -> impl Iterator> + 'a { - MemoryStore::get_entries_with_authorisation(self, namespace, range) + fn unsubscribe(&mut self, session: &SessionId) { + self.senders.remove(session); + self.areas.retain(|_namespace, sessions| { + sessions.remove(session); + !sessions.is_empty() + }); } -} - -impl EntryStore for MemoryStore { - type Snapshot = Arc; - fn snapshot(&mut self) -> Result { - Ok(Arc::new(Self { - entries: self.entries.clone(), - })) + fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + self.areas + .entry(namespace) + .or_default() + .entry(session) + .or_default() + .push(area) } - fn ingest_entry(&mut self, entry: &AuthorisedEntry) -> Result { - let entries = self.entries.entry(entry.namespace_id()).or_default(); - let new = entry.entry(); - let mut to_remove = vec![]; - for (i, existing) in entries.iter().enumerate() { - let existing = existing.entry(); - if existing == new { - return Ok(false); - } - if existing.subspace_id == new.subspace_id - && existing.path.is_prefix_of(&new.path) - && existing.is_newer_than(new) - { - // we cannot insert the entry, a newer entry exists - return Ok(false); + fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { + let Some(sessions) = self.areas.get_mut(&entry.namespace_id()) else { + return; + }; + for (session_id, areas) in sessions { + if let Origin::Remote(origin) = origin { + if origin == *session_id { + continue; + } } - if new.subspace_id == existing.subspace_id - && new.path.is_prefix_of(&existing.path) - && new.is_newer_than(existing) - { - to_remove.push(i); + if areas.iter().any(|area| area.includes_entry(entry.entry())) { + self.senders + .get(session_id) + .expect("session sender to exist") + .send(entry.clone()) + .ok(); } } - for i in to_remove { - entries.remove(i); - } - entries.push(entry.clone()); - Ok(true) } } - -pub type RangeSplit = (ThreeDRange, SplitAction); - -#[derive(Debug)] -pub enum SplitAction { - SendFingerprint(Fingerprint), - SendEntries(u64), -} diff --git a/iroh-willow/src/store/broadcaster.rs b/iroh-willow/src/store/broadcaster.rs deleted file mode 100644 index e4d9c839e2..0000000000 --- a/iroh-willow/src/store/broadcaster.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; -use tokio::sync::broadcast; - -use crate::{ - proto::{ - grouping::Area, - willow::{AuthorisedEntry, NamespaceId}, - }, - store::{EntryStore, Shared}, -}; - -use super::SessionId; - -const BROADCAST_CAP: usize = 1024; - -#[derive(Debug, Clone, Copy)] -pub enum Origin { - Local, - Remote(SessionId), -} - -#[derive(Debug)] -pub struct Broadcaster { - store: Shared, - broadcast: Arc>, -} - -impl Clone for Broadcaster { - fn clone(&self) -> Self { - Broadcaster { - store: self.store.clone(), - broadcast: self.broadcast.clone(), - } - } -} - -impl std::ops::Deref for Broadcaster { - type Target = Shared; - fn deref(&self) -> &Self::Target { - &self.store - } -} - -impl Broadcaster { - pub fn new(store: Shared) -> Self { - Self { - store, - broadcast: Default::default(), - } - } - - pub fn ingest_entry( - &mut self, - entry: &AuthorisedEntry, - origin: Origin, - ) -> anyhow::Result { - if self.store.ingest_entry(entry)? { - self.broadcast.lock().unwrap().broadcast(entry, origin); - Ok(true) - } else { - Ok(false) - } - } - - pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { - self.broadcast.lock().unwrap().subscribe(session_id) - } - - pub fn unsubscribe(&self, session_id: &SessionId) { - self.broadcast.lock().unwrap().unsubscribe(session_id) - } - - pub fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { - self.broadcast - .lock() - .unwrap() - .watch_area(session, namespace, area); - } -} - -#[derive(Debug, Default)] -struct BroadcasterInner { - senders: HashMap>, - areas: HashMap>>, -} - -impl BroadcasterInner { - fn subscribe(&mut self, session: SessionId) -> broadcast::Receiver { - self.senders - .entry(session) - .or_insert_with(|| broadcast::Sender::new(BROADCAST_CAP)) - .subscribe() - } - - fn unsubscribe(&mut self, session: &SessionId) { - self.senders.remove(session); - self.areas.retain(|_namespace, sessions| { - sessions.remove(session); - !sessions.is_empty() - }); - } - - fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { - self.areas - .entry(namespace) - .or_default() - .entry(session) - .or_default() - .push(area) - } - - fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { - let Some(sessions) = self.areas.get_mut(&entry.namespace_id()) else { - return; - }; - for (session_id, areas) in sessions { - if let Origin::Remote(origin) = origin { - if origin == *session_id { - continue; - } - } - if areas.iter().any(|area| area.includes_entry(entry.entry())) { - self.senders - .get(session_id) - .expect("session sender to exist") - .send(entry.clone()) - .ok(); - } - } - } -} diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs new file mode 100644 index 0000000000..dc8db9e3ac --- /dev/null +++ b/iroh-willow/src/store/memory.rs @@ -0,0 +1,233 @@ +use std::cell::RefCell; +use std::collections::HashMap; +use std::rc::Rc; + +use anyhow::Result; + +use crate::{ + proto::{ + grouping::{Range, RangeEnd, ThreeDRange}, + keys::{NamespaceSecretKey, UserId, UserSecretKey}, + meadowcap, + sync::Fingerprint, + willow::{AuthorisedEntry, Entry, NamespaceId}, + }, + store::traits::{self, RangeSplit, SplitAction, SplitOpts}, +}; + +#[derive(Debug, Clone, Default)] +pub struct Store { + secrets: Rc>, + entries: Rc>, + payloads: iroh_blobs::store::mem::Store, +} + +impl Store { + pub fn new(payloads: iroh_blobs::store::mem::Store) -> Self { + Self { + payloads, + secrets: Default::default(), + entries: Default::default(), + } + } +} + +impl traits::Storage for Store { + type Entries = Rc>; + type Secrets = Rc>; + type Payloads = iroh_blobs::store::mem::Store; + + fn entries(&self) -> &Self::Entries { + &self.entries + } + + fn secrets(&self) -> &Self::Secrets { + &self.secrets + } + + fn payloads(&self) -> &Self::Payloads { + &self.payloads + } +} + +#[derive(Debug, Default)] +pub struct SecretStore { + user: HashMap, + namespace: HashMap, +} + +impl traits::SecretStorage for Rc> { + fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), traits::SecretStoreError> { + let mut slf = self.borrow_mut(); + match secret { + meadowcap::SecretKey::User(secret) => { + slf.user.insert(secret.id(), secret); + } + meadowcap::SecretKey::Namespace(secret) => { + slf.namespace.insert(secret.id(), secret); + } + }; + Ok(()) + } + + fn get_user(&self, id: &UserId) -> Option { + self.borrow().user.get(id).cloned() + } + + fn get_namespace(&self, id: &NamespaceId) -> Option { + self.borrow().namespace.get(id).cloned() + } +} + +#[derive(Debug, Default)] +pub struct EntryStore { + entries: HashMap>, +} + +// impl + 'static> ReadonlyStore for T { +impl traits::EntryReader for Rc> { + fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + let mut fingerprint = Fingerprint::default(); + for entry in self.get_entries(namespace, range) { + let entry = entry?; + fingerprint.add_entry(&entry); + } + Ok(fingerprint) + } + + fn split_range( + &self, + namespace: NamespaceId, + range: &ThreeDRange, + config: &SplitOpts, + ) -> Result>> { + let count = self.get_entries(namespace, range).count(); + if count <= config.max_set_size { + return Ok( + vec![Ok((range.clone(), SplitAction::SendEntries(count as u64)))].into_iter(), + ); + } + let mut entries: Vec = self + .get_entries(namespace, range) + .filter_map(|e| e.ok()) + .collect(); + + entries.sort_by(|e1, e2| e1.as_set_sort_tuple().cmp(&e2.as_set_sort_tuple())); + + let split_index = count / 2; + let mid = entries.get(split_index).expect("not empty"); + let mut ranges = vec![]; + // split in two halves by subspace + if mid.subspace_id != range.subspaces.start { + ranges.push(ThreeDRange::new( + Range::new(range.subspaces.start, RangeEnd::Closed(mid.subspace_id)), + range.paths.clone(), + range.times, + )); + ranges.push(ThreeDRange::new( + Range::new(mid.subspace_id, range.subspaces.end), + range.paths.clone(), + range.times, + )); + } + // split by path + else if mid.path != range.paths.start { + ranges.push(ThreeDRange::new( + range.subspaces, + Range::new( + range.paths.start.clone(), + RangeEnd::Closed(mid.path.clone()), + ), + range.times, + )); + ranges.push(ThreeDRange::new( + range.subspaces, + Range::new(mid.path.clone(), range.paths.end.clone()), + range.times, + )); + // split by time + } else { + ranges.push(ThreeDRange::new( + range.subspaces, + range.paths.clone(), + Range::new(range.times.start, RangeEnd::Closed(mid.timestamp)), + )); + ranges.push(ThreeDRange::new( + range.subspaces, + range.paths.clone(), + Range::new(mid.timestamp, range.times.end), + )); + } + let mut out = vec![]; + for range in ranges { + let fingerprint = self.fingerprint(namespace, &range)?; + out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); + } + Ok(out.into_iter()) + } + + fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + Ok(self.get_entries(namespace, range).count() as u64) + } + + fn get_entries_with_authorisation<'a>( + &'a self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a { + let slf = self.borrow(); + slf.entries + .get(&namespace) + .into_iter() + .flatten() + .filter(|entry| range.includes_entry(entry.entry())) + .map(|e| Result::<_, anyhow::Error>::Ok(e.clone())) + .collect::>() + .into_iter() + } +} + +impl traits::EntryStorage for Rc> { + type Snapshot = Self; + type Reader = Self; + + fn reader(&self) -> Self::Reader { + self.clone() + } + + fn snapshot(&self) -> Result { + let entries = self.borrow().entries.clone(); + Ok(Rc::new(RefCell::new(EntryStore { entries }))) + } + + fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { + let mut slf = self.borrow_mut(); + let entries = slf.entries.entry(entry.namespace_id()).or_default(); + let new = entry.entry(); + let mut to_remove = vec![]; + for (i, existing) in entries.iter().enumerate() { + let existing = existing.entry(); + if existing == new { + return Ok(false); + } + if existing.subspace_id == new.subspace_id + && existing.path.is_prefix_of(&new.path) + && existing.is_newer_than(new) + { + // we cannot insert the entry, a newer entry exists + return Ok(false); + } + if new.subspace_id == existing.subspace_id + && new.path.is_prefix_of(&existing.path) + && new.is_newer_than(existing) + { + to_remove.push(i); + } + } + for i in to_remove { + entries.remove(i); + } + entries.push(entry.clone()); + Ok(true) + } +} diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs new file mode 100644 index 0000000000..a1abd300e3 --- /dev/null +++ b/iroh-willow/src/store/traits.rs @@ -0,0 +1,117 @@ +use anyhow::Result; + +use crate::proto::{ + grouping::ThreeDRange, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap, + sync::Fingerprint, + willow::{AuthorisedEntry, Entry, NamespaceId}, +}; + +pub trait Storage: Clone + 'static { + type Entries: EntryStorage; + type Secrets: SecretStorage; + type Payloads: iroh_blobs::store::Store; + fn entries(&self) -> &Self::Entries; + fn secrets(&self) -> &Self::Secrets; + fn payloads(&self) -> &Self::Payloads; +} + +pub trait SecretStorage: std::fmt::Debug + 'static { + fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), SecretStoreError>; + fn get_user(&self, id: &UserId) -> Option; + fn get_namespace(&self, id: &NamespaceId) -> Option; + + fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { + Ok(self + .get_user(id) + .ok_or(SecretStoreError::MissingKey)? + .sign(message)) + } + fn sign_namespace( + &self, + id: &NamespaceId, + message: &[u8], + ) -> Result { + Ok(self + .get_namespace(id) + .ok_or(SecretStoreError::MissingKey)? + .sign(message)) + } +} + +pub trait EntryStorage: EntryReader + Clone + std::fmt::Debug + 'static { + type Reader: EntryReader; + type Snapshot: EntryReader + Clone; + + fn reader(&self) -> Self::Reader; + fn snapshot(&self) -> Result; + fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result; +} + +pub trait EntryReader: 'static { + fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + + fn split_range( + &self, + namespace: NamespaceId, + range: &ThreeDRange, + config: &SplitOpts, + ) -> Result>>; + + fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + + fn get_entries_with_authorisation<'a>( + &'a self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> + 'a; + + fn get_entries( + &self, + namespace: NamespaceId, + range: &ThreeDRange, + ) -> impl Iterator> { + self.get_entries_with_authorisation(namespace, range) + .map(|e| e.map(|e| e.into_entry())) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum SecretStoreError { + #[error("store failed: {0}")] + Store(#[from] anyhow::Error), + #[error("missing secret key")] + MissingKey, +} + +#[derive(Debug, Copy, Clone)] +pub enum KeyScope { + Namespace, + User, +} + +pub type RangeSplit = (ThreeDRange, SplitAction); + +#[derive(Debug)] +pub enum SplitAction { + SendFingerprint(Fingerprint), + SendEntries(u64), +} + +#[derive(Debug, Clone, Copy)] +pub struct SplitOpts { + /// Up to how many values to send immediately, before sending only a fingerprint. + pub max_set_size: usize, + /// `k` in the protocol, how many splits to generate. at least 2 + pub split_factor: usize, +} + +impl Default for SplitOpts { + fn default() -> Self { + SplitOpts { + max_set_size: 1, + split_factor: 2, + } + } +} From 6cb0017e222535da1c2ef03a27b38c403b16b61c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 3 Jun 2024 22:30:08 +0200 Subject: [PATCH 059/198] cleanup and renames --- iroh-willow/src/actor.rs | 8 +++----- iroh-willow/src/session.rs | 2 ++ iroh-willow/src/session/data.rs | 11 +++++------ iroh-willow/src/session/payload.rs | 11 +++++++---- iroh-willow/src/session/reconciler.rs | 4 ++-- iroh-willow/src/session/state.rs | 3 +-- iroh-willow/src/store.rs | 6 +++--- 7 files changed, 23 insertions(+), 22 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index b5fc7f524b..66730deef9 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -15,7 +15,7 @@ use crate::{ meadowcap, willow::{AuthorisedEntry, Entry, WriteCapability}, }, - session::{Channels, Error, Role, Session, SessionInit}, + session::{Channels, Error, Role, Session, SessionId, SessionInit}, store::{ traits::{EntryReader, SecretStorage, Storage}, Origin, Store, @@ -25,8 +25,6 @@ use crate::{ pub const INBOX_CAP: usize = 1024; -pub type SessionId = u64; - #[derive(Debug, Clone)] pub struct ActorHandle { tx: flume::Sender, @@ -332,7 +330,7 @@ impl Actor { } } ToActor::IngestEntry { entry, reply } => { - let res = self.store.entries().ingest_entry(&entry, Origin::Local); + let res = self.store.entries().ingest(&entry, Origin::Local); send_reply(reply, res) } ToActor::InsertEntry { @@ -349,7 +347,7 @@ impl Actor { let authorised_entry = entry.attach_authorisation(capability, &user_secret)?; slf.store .entries() - .ingest_entry(&authorised_entry, Origin::Local) + .ingest(&authorised_entry, Origin::Local) .map_err(Error::Store) }), ToActor::InsertSecret { secret, reply } => { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index f76e222489..5496994576 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -18,6 +18,8 @@ pub use self::channels::Channels; pub use self::error::Error; pub use self::state::Session; +pub type SessionId = u64; + /// Data from the initial transmission /// /// This happens before the session is initialized. diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index c6a6923223..2b2575b4c3 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -7,7 +7,7 @@ use crate::{ sync::{DataMessage, DataSendEntry, DataSendPayload}, willow::AuthorisedEntry, }, - session::Error, + session::{payload::DEFAULT_CHUNK_SIZE, Error}, store::{traits::Storage, Origin, Store}, }; @@ -34,7 +34,7 @@ impl DataSender { } Err(broadcast::error::RecvError::Closed) => break, Err(broadcast::error::RecvError::Lagged(_count)) => { - // TODO + // TODO: Queue another reconciliation } } } @@ -62,13 +62,12 @@ impl DataSender { // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; - let chunk_size = 1024 * 64; if send_payloads { send_payload_chunked( digest, self.store.payloads(), &self.session, - chunk_size, + DEFAULT_CHUNK_SIZE, |bytes| DataSendPayload { bytes }.into(), ) .await?; @@ -122,7 +121,7 @@ impl DataReceiver { .await?; self.store .entries() - .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; + .ingest(&authorised_entry, Origin::Remote(*self.session.id()))?; self.current_payload .set(authorised_entry.into_entry(), None)?; Ok(()) @@ -130,7 +129,7 @@ impl DataReceiver { async fn on_send_payload(&mut self, message: DataSendPayload) -> Result<(), Error> { self.current_payload - .recv_chunk(self.store.payloads().clone(), message.bytes) + .recv_chunk(self.store.payloads(), message.bytes) .await?; if self.current_payload.is_complete() { self.current_payload.finalize().await?; diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index 12534eedeb..ab287980c2 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -14,6 +14,8 @@ use crate::proto::{ use super::{Error, Session}; +pub const DEFAULT_CHUNK_SIZE: usize = 1024 * 64; + pub async fn send_payload_chunked( digest: PayloadDigest, payload_store: &P, @@ -83,13 +85,15 @@ impl CurrentPayload { pub async fn recv_chunk( &mut self, - store: P, + store: &P, chunk: Bytes, ) -> anyhow::Result<()> { let state = self.0.as_mut().ok_or(Error::InvalidMessageInCurrentState)?; let len = chunk.len(); + let store = store.clone(); let writer = state.writer.get_or_insert_with(move || { let (tx, rx) = flume::bounded(1); + let store = store.clone(); let fut = async move { store .import_stream( @@ -99,11 +103,10 @@ impl CurrentPayload { ) .await }; - let writer = PayloadWriter { + PayloadWriter { fut: fut.boxed_local(), sender: tx, - }; - writer + } }); writer.sender.send_async(Ok(chunk)).await?; state.received_length += len as u64; diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 282e164d33..1ecb7a9a26 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -193,7 +193,7 @@ impl Reconciler { .await?; self.store .entries() - .ingest_entry(&authorised_entry, Origin::Remote(*self.session.id()))?; + .ingest(&authorised_entry, Origin::Remote(*self.session.id()))?; self.current_payload .set(authorised_entry.into_entry(), Some(message.entry.available))?; Ok(()) @@ -201,7 +201,7 @@ impl Reconciler { async fn on_send_payload(&mut self, message: ReconciliationSendPayload) -> Result<(), Error> { self.current_payload - .recv_chunk(self.store.payloads().clone(), message.bytes) + .recv_chunk(self.store.payloads(), message.bytes) .await?; Ok(()) } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index ad7acac3ba..d07e5c8a53 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -11,7 +11,6 @@ use futures_lite::Stream; use tracing::{Instrument, Span}; use crate::{ - actor::SessionId, net::InitialTransmission, proto::{ challenge::ChallengeState, @@ -32,7 +31,7 @@ use crate::{ use super::{ channels::ChannelSenders, resource::{ResourceMap, ResourceMaps}, - AreaOfInterestIntersection, Error, Role, Scope, SessionMode, + AreaOfInterestIntersection, Error, Role, Scope, SessionId, SessionMode, }; #[derive(Debug, Clone)] diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 5d18638b3f..c8d6720ce1 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -5,11 +5,11 @@ use std::{ use tokio::sync::broadcast; use crate::{ - actor::SessionId, proto::{ grouping::Area, willow::{AuthorisedEntry, NamespaceId}, }, + session::SessionId, }; use self::traits::{EntryStorage, Storage}; @@ -28,7 +28,7 @@ pub enum Origin { #[derive(Debug, Clone)] pub struct Store { storage: S, - entries: EntryStore<::Entries>, + entries: EntryStore, } #[derive(Debug, Clone)] @@ -60,7 +60,7 @@ impl EntryStore { self.storage.snapshot() } - pub fn ingest_entry(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { + pub fn ingest(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { if self.storage.ingest_entry(entry)? { self.broadcast.lock().unwrap().broadcast(entry, origin); Ok(true) From 836e225db43a31f470ae1db82c74d1fdf5f9abe6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 4 Jun 2024 11:04:13 +0200 Subject: [PATCH 060/198] more cleanup --- iroh-willow/src/net.rs | 37 ++++++++++++++--------------- iroh-willow/src/proto/grouping.rs | 35 ++++++++++----------------- iroh-willow/src/session/payload.rs | 4 ---- iroh-willow/src/session/resource.rs | 8 +++---- 4 files changed, 33 insertions(+), 51 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 38a0f9b7e7..57f82cd4b4 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -160,7 +160,6 @@ async fn open_logical_channels( .iter_mut() .find_map(|(ch, streams)| (*ch == channel).then(|| streams.take())) .flatten() - .ok_or(MissingChannel(channel)) .map(|(send_stream, recv_stream)| { spawn_channel( join_set, @@ -172,6 +171,7 @@ async fn open_logical_channels( recv_stream, ) }) + .ok_or(MissingChannel(channel)) }; let rec = take_and_spawn_channel(LogicalChannel::Reconciliation)?; @@ -208,7 +208,7 @@ fn spawn_channel( recv_stream: RecvStream, ) -> (Sender, Receiver) { let (sender, outbound_reader) = outbound_channel(send_cap, guarantees); - let (inbound_writer, recveiver) = inbound_channel(recv_cap); + let (inbound_writer, receiver) = inbound_channel(recv_cap); let recv_fut = recv_loop(recv_stream, inbound_writer) .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) @@ -222,7 +222,7 @@ fn spawn_channel( join_set.spawn(send_fut); - (sender, recveiver) + (sender, receiver) } async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> anyhow::Result<()> { @@ -308,7 +308,7 @@ mod tests { use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; use rand_core::CryptoRngCore; - use tracing::{debug, info}; + use tracing::info; use crate::{ actor::ActorHandle, @@ -336,13 +336,6 @@ mod tests { let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; - debug!("start connect"); - let (conn_alfie, conn_betty) = tokio::join!( - async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, - async move { ep_betty.accept().await.unwrap().await.unwrap() } - ); - info!("connected! now start reconciliation"); - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); let namespace_id = namespace_secret.id(); @@ -374,10 +367,16 @@ mod tests { |n| Path::new(&[b"betty", n.to_string().as_bytes()]), ) .await?; - info!("init took {:?}", start.elapsed()); + let start = Instant::now(); + let (conn_alfie, conn_betty) = tokio::join!( + async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, + async move { ep_betty.accept().await.unwrap().await.unwrap() } + ); + info!("connecting took {:?}", start.elapsed()); + let start = Instant::now(); let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, @@ -422,13 +421,6 @@ mod tests { let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; - debug!("start connect"); - let (conn_alfie, conn_betty) = tokio::join!( - async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, - async move { ep_betty.accept().await.unwrap().await.unwrap() } - ); - info!("connected! now start reconciliation"); - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); let namespace_id = namespace_secret.id(); @@ -462,8 +454,15 @@ mod tests { .await?; info!("init took {:?}", start.elapsed()); + let start = Instant::now(); + let (conn_alfie, conn_betty) = tokio::join!( + async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, + async move { ep_betty.accept().await.unwrap().await.unwrap() } + ); + info!("connecting took {:?}", start.elapsed()); + let start = Instant::now(); let (done_tx, done_rx) = tokio::sync::oneshot::channel(); // alfie insert 3 enries after waiting a second diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 1c8f20cc94..0cc308daf5 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -64,7 +64,7 @@ impl ThreeDRange { self.subspaces.is_empty() || self.paths.is_empty() || self.times.is_empty() } - /// Get the intersection between this and another range. + /// Returns the intersection between `self` and `other`. pub fn intersection(&self, other: &ThreeDRange) -> Option { let paths = self.paths.intersection(&other.paths)?; let times = self.times.intersection(&other.times)?; @@ -190,17 +190,6 @@ impl PartialOrd for RangeEnd { } } -// impl PartialOrd for RangeEnd { -// fn partial_cmp(&self, other: &T) -> Option { -// // match (self, other) { -// // (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), -// // (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), -// // (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), -// // (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), -// // } -// } -// } - impl RangeEnd { /// Returns `true` if the range end is open, or if `value` is strictly less than the range end. pub fn includes(&self, value: &T) -> bool { @@ -237,7 +226,7 @@ impl AreaOfInterest { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct Area { /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. - pub subspace_id: SubspaceArea, + pub subspace: SubspaceArea, /// To be included in this Area, an Entry’s path must be prefixed by the path. pub path: Path, /// To be included in this Area, an Entry’s timestamp must be included in the times. @@ -245,9 +234,9 @@ pub struct Area { } impl Area { - pub const fn new(subspace_id: SubspaceArea, path: Path, times: Range) -> Self { + pub const fn new(subspace: SubspaceArea, path: Path, times: Range) -> Self { Self { - subspace_id, + subspace, path, times, } @@ -270,13 +259,13 @@ impl Area { } pub fn includes_entry(&self, entry: &Entry) -> bool { - self.subspace_id.includes_subspace(&entry.subspace_id) + self.subspace.includes_subspace(&entry.subspace_id) && self.path.is_prefix_of(&entry.path) && self.times.includes(&entry.timestamp) } pub fn includes_area(&self, other: &Area) -> bool { - self.subspace_id.includes(&other.subspace_id) + self.subspace.includes(&other.subspace) && self.path.is_prefix_of(&other.path) && self.times.includes_range(&other.times) } @@ -287,10 +276,10 @@ impl Area { RangeEnd::Open => true, RangeEnd::Closed(path) => self.path.is_prefix_of(path), }; - let subspace_start = self.subspace_id.includes_subspace(&range.subspaces.start); + let subspace_start = self.subspace.includes_subspace(&range.subspaces.start); let subspace_end = match range.subspaces.end { RangeEnd::Open => true, - RangeEnd::Closed(subspace) => self.subspace_id.includes_subspace(&subspace), + RangeEnd::Closed(subspace) => self.subspace.includes_subspace(&subspace), }; subspace_start && subspace_end @@ -300,11 +289,11 @@ impl Area { } pub fn into_range(&self) -> ThreeDRange { - let subspace_start = match self.subspace_id { + let subspace_start = match self.subspace { SubspaceArea::Any => SubspaceId::default(), SubspaceArea::Id(id) => id, }; - let subspace_end = match self.subspace_id { + let subspace_end = match self.subspace { SubspaceArea::Any => RangeEnd::Open, SubspaceArea::Id(id) => subspace_range_end(id), }; @@ -318,11 +307,11 @@ impl Area { } pub fn intersection(&self, other: &Area) -> Option { - let subspace_id = self.subspace_id.intersection(&other.subspace_id)?; + let subspace_id = self.subspace.intersection(&other.subspace)?; let path = self.path.intersection(&other.path)?; let times = self.times.intersection(&other.times)?; Some(Self { - subspace_id, + subspace: subspace_id, times, path, }) diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index ab287980c2..655757f01f 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -1,6 +1,5 @@ use bytes::Bytes; use futures_lite::{future::BoxedLocal, FutureExt}; -// use iroh_blobs::{store::Store as PayloadStore, util::progress::IgnoreProgressSender, TempTag}; use iroh_blobs::{ store::{bao_tree::io::fsm::AsyncSliceReader, MapEntry, Store as PayloadStore}, util::progress::IgnoreProgressSender, @@ -110,9 +109,6 @@ impl CurrentPayload { }); writer.sender.send_async(Ok(chunk)).await?; state.received_length += len as u64; - // if state.received_length >= state.expected_length { - // self.finalize().await?; - // } Ok(()) } diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 1507bd9894..63e027b6f7 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -27,13 +27,11 @@ impl ResourceMaps { } } - pub fn get( - &self, - selector: F, - handle: H, - ) -> Result + pub fn get(&self, selector: F, handle: H) -> Result where + H: IsHandle, F: for<'a> Fn(&'a Self) -> &'a ResourceMap, + R: Eq + PartialEq + Clone, { let res = selector(self); res.try_get(&handle).cloned() From a49f836d1a4949f266597942aa23500d133bc057 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 14 Jun 2024 20:11:20 +0200 Subject: [PATCH 061/198] cleanups and renames --- iroh-willow/src/actor.rs | 21 +++++++++++---------- iroh-willow/src/net.rs | 19 ++++++------------- iroh-willow/src/session.rs | 7 +++++++ iroh-willow/src/session/run.rs | 12 ++++++------ iroh-willow/src/session/state.rs | 2 +- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 66730deef9..4708e367a5 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -8,14 +8,13 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ - net::InitialTransmission, proto::{ grouping::ThreeDRange, keys::NamespaceId, meadowcap, willow::{AuthorisedEntry, Entry, WriteCapability}, }, - session::{Channels, Error, Role, Session, SessionId, SessionInit}, + session::{Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit}, store::{ traits::{EntryReader, SecretStorage, Storage}, Origin, Store, @@ -144,7 +143,7 @@ impl Drop for ActorHandle { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { self.tx.send(ToActor::Shutdown { reply: None }).ok(); - let handle = handle.take().expect("this can only run once"); + let handle = handle.take().expect("may only run once"); if let Err(err) = handle.join() { warn!(?err, "Failed to join sync actor"); } @@ -155,7 +154,7 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { on_finish: future::Shared>>>, - finish: CancellationToken, + cancel_token: CancellationToken, } impl SessionHandle { @@ -171,8 +170,8 @@ impl SessionHandle { /// After calling this, no further protocol messages will be sent from this node. /// Previously queued messages will still be sent out. The session will only be closed /// once the other peer closes their senders as well. - pub fn finish(&self) { - self.finish.cancel(); + pub fn close(&self) { + self.cancel_token.cancel(); } } @@ -185,7 +184,6 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, - // on_finish: oneshot::Sender>, reply: oneshot::Sender>, }, GetEntries { @@ -290,10 +288,10 @@ impl Actor { let session = Session::new(id, init.mode, our_role, send, initial_transmission); let store = self.store.clone(); - let finish = CancellationToken::new(); + let cancel_token = CancellationToken::new(); let future = session - .run(store, recv, init, finish.clone()) + .run(store, recv, init, cancel_token.clone()) .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); @@ -313,7 +311,10 @@ impl Actor { }) .boxed() .shared(); - let handle = SessionHandle { on_finish, finish }; + let handle = SessionHandle { + on_finish, + cancel_token, + }; send_reply(reply, Ok(handle)) } ToActor::GetEntries { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 57f82cd4b4..c168c5b642 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -12,7 +12,7 @@ use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrume use crate::{ actor::{self, ActorHandle}, proto::sync::{ - AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, + AccessChallenge, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, session::{ @@ -20,7 +20,7 @@ use crate::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, - Role, SessionInit, + InitialTransmission, Role, SessionInit, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -90,13 +90,13 @@ pub struct SessionHandle { } impl SessionHandle { - /// Finish the session gracefully. + /// Close the session gracefully. /// /// After calling this, no further protocol messages will be sent from this node. /// Previously queued messages will still be sent out. The session will only be closed /// once the other peer closes their senders as well. - pub fn finish(&self) { - self.handle.finish() + pub fn close(&self) { + self.handle.close() } /// Wait for the session to finish. @@ -271,13 +271,6 @@ async fn exchange_commitments( }) } -#[derive(Debug)] -pub struct InitialTransmission { - pub our_nonce: AccessChallenge, - pub received_commitment: ChallengeHash, - pub their_max_payload_size: u64, -} - async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result<()> { let mut final_result = Ok(()); let mut joined = 0; @@ -513,7 +506,7 @@ mod tests { let live_entries = done_rx.await?; expected_entries.extend(live_entries); - session_alfie.finish(); + session_alfie.close(); let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); info!(time=?start.elapsed(), "reconciliation finished"); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 5496994576..12b3c23ea3 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -66,6 +66,13 @@ impl SessionMode { } } +#[derive(Debug, Default, Clone)] +pub enum Interests { + #[default] + All, + Some(HashSet), +} + /// Options to initialize a session with. #[derive(Debug)] pub struct SessionInit { diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 1baadbe416..3f67053359 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -25,7 +25,7 @@ impl Session { store: Store, recv: ChannelReceivers, init: SessionInit, - finish: CancellationToken, + cancel_token: CancellationToken, ) -> Result<(), Error> { let ChannelReceivers { control_recv, @@ -83,24 +83,24 @@ impl Session { // Spawn a task to handle reconciliation messages self.spawn(error_span!("rec"), { - let finish = finish.clone(); + let cancel_token = cancel_token.clone(); let store = store.clone(); move |session| async move { let res = Reconciler::new(session, store, reconciliation_recv)? .run() .await; - finish.cancel(); + cancel_token.cancel(); res } }); // Spawn a task to handle control messages self.spawn(error_span!("ctl"), { - let finish = finish.clone(); + let cancel_token = cancel_token.clone(); let store = store.clone(); move |session| async move { let res = control_loop(session, store, control_recv, init).await; - finish.cancel(); + cancel_token.cancel(); res } }); @@ -110,7 +110,7 @@ impl Session { // Wait until the session is cancelled: // * either because SessionMode is ReconcileOnce and reconciliation finished // * or because the session was cancelled from the outside session handle - finish.cancelled().await; + cancel_token.cancelled().await; // Then close all senders. This will make all other tasks terminate once the remote // closed their senders as well. session.close_senders(); diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index d07e5c8a53..76767e5f23 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -11,7 +11,6 @@ use futures_lite::Stream; use tracing::{Instrument, Span}; use crate::{ - net::InitialTransmission, proto::{ challenge::ChallengeState, grouping::ThreeDRange, @@ -24,6 +23,7 @@ use crate::{ }, willow::{AuthorisedEntry, Entry}, }, + session::InitialTransmission, store::traits::SecretStorage, util::{channel::WriteError, queue::Queue, task::JoinMap}, }; From 6f681675d3d7e78f08dc79bfacfd033c77334b18 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 14 Jun 2024 20:11:42 +0200 Subject: [PATCH 062/198] add types for subspace capabilities --- iroh-willow/src/proto/meadowcap.rs | 56 ++++++++++++++++++++++++++++++ iroh-willow/src/proto/sync.rs | 5 +++ 2 files changed, 61 insertions(+) diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 40eebce2c3..b0b51e2109 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -334,3 +334,59 @@ impl OwnedCapability { signable } } + +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] +/// A capability that certifies read access to arbitrary SubspaceIds at some unspecified Path. +pub struct McSubspaceCapability { + /// The namespace for which this grants access. + pub namespace_key: NamespacePublicKey, + + /// The user to whom this grants access. + pub user_key: UserPublicKey, + + /// Authorisation of the user_key by the namespace_key. + pub initial_authorisation: NamespaceSignature, + + /// Successive authorisations of new UserPublicKeys. + pub delegations: Vec<(UserPublicKey, UserSignature)>, +} + +impl McSubspaceCapability { + pub fn receiver(&self) -> &UserPublicKey { + &self.user_key + } + + pub fn granted_namespace(&self) -> &NamespacePublicKey { + &self.namespace_key + } + + pub fn validate(&self) -> Result<(), InvalidCapability> { + match self.is_valid() { + true => Ok(()), + false => Err(InvalidCapability), + } + } + + pub fn is_valid(&self) -> bool { + if self.delegations.is_empty() { + let signable = Self::signable(&self.user_key); + self.namespace_key + .verify(&signable, &self.initial_authorisation) + .is_ok() + } else { + // TODO: support delegations + false + } + } + + fn signable(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { + let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; + // A McSubspaceCapability with zero delegations is valid if initial_authorisation + // is a NamespaceSignature issued by the namespace_key over the byte 0x02, + // followed by the user_key (encoded via encode_user_pk). + // via https://willowprotocol.org/specs/pai/index.html#subspace_cap_valid + signable[0] = 0x02; + signable[1..].copy_from_slice(user_key.as_bytes()); + signable + } +} diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 034d4b9095..f4417a1a18 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -42,9 +42,14 @@ pub type DynamicToken = meadowcap::UserSignature; /// We describe the details in a capability-system-agnostic way here. /// To use Meadowcap for this approach, simply choose the type of valid McCapabilities with access mode read as the read capabilities. pub type ReadCapability = meadowcap::McCapability; +pub type SubspaceCapability = meadowcap::McSubspaceCapability; pub type SyncSignature = meadowcap::UserSignature; pub type Receiver = meadowcap::UserPublicKey; +/// Represents an authorisation to read an area of data in a Namespace. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReadAuthorisation(ReadCapability, Option); + /// The different resource handles employed by the WGPS. #[derive(Debug, Serialize, Deserialize, strum::Display)] pub enum HandleType { From 0698436d8c2d03e35c4784d70a71d4d2c97225f4 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 18 Jun 2024 00:20:00 +0200 Subject: [PATCH 063/198] feat/refactor: implement auth and delegations --- iroh-blobs/src/store/traits.rs | 2 +- iroh-willow/src/actor.rs | 183 +++++++++--------- iroh-willow/src/engine.rs | 46 +++++ iroh-willow/src/form.rs | 182 ++++++++++++++++++ iroh-willow/src/lib.rs | 2 + iroh-willow/src/net.rs | 6 +- iroh-willow/src/proto/grouping.rs | 158 +++++++++++++++- iroh-willow/src/proto/keys.rs | 7 +- iroh-willow/src/proto/meadowcap.rs | 286 +++++++++++++++++++++++++--- iroh-willow/src/proto/sync.rs | 26 ++- iroh-willow/src/proto/willow.rs | 69 ++++++- iroh-willow/src/store.rs | 240 ++++++++++++------------ iroh-willow/src/store/auth.rs | 289 +++++++++++++++++++++++++++++ iroh-willow/src/store/entry.rs | 153 +++++++++++++++ iroh-willow/src/store/traits.rs | 9 +- iroh-willow/src/util/codec.rs | 38 ++++ iroh-willow/src/util/task.rs | 9 +- 17 files changed, 1456 insertions(+), 249 deletions(-) create mode 100644 iroh-willow/src/engine.rs create mode 100644 iroh-willow/src/form.rs create mode 100644 iroh-willow/src/store/auth.rs create mode 100644 iroh-willow/src/store/entry.rs diff --git a/iroh-blobs/src/store/traits.rs b/iroh-blobs/src/store/traits.rs index e0ec3e6b39..2a91d1c0f3 100644 --- a/iroh-blobs/src/store/traits.rs +++ b/iroh-blobs/src/store/traits.rs @@ -295,7 +295,7 @@ pub trait ReadableStore: Map { } /// The mutable part of a Bao store. -pub trait Store: ReadableStore + MapMut { +pub trait Store: ReadableStore + MapMut + std::fmt::Debug { /// This trait method imports a file from a local path. /// /// `data` is the path to the file. diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 4708e367a5..d7fa11e8a3 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -1,18 +1,20 @@ use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; +use anyhow::Result; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; -use tokio::sync::oneshot; +use tokio::{sync::oneshot, task::JoinSet}; use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ + form::{AuthForm, EntryForm, EntryOrForm}, proto::{ grouping::ThreeDRange, keys::NamespaceId, meadowcap, - willow::{AuthorisedEntry, Entry, WriteCapability}, + willow::{AuthorisedEntry, Entry}, }, session::{Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit}, store::{ @@ -50,6 +52,7 @@ impl ActorHandle { inbox_rx: rx, next_session_id: 0, session_tasks: Default::default(), + tasks: Default::default(), }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -59,29 +62,30 @@ impl ActorHandle { let join_handle = Arc::new(Some(join_handle)); ActorHandle { tx, join_handle } } - pub async fn send(&self, action: ToActor) -> anyhow::Result<()> { + pub async fn send(&self, action: ToActor) -> Result<()> { self.tx.send_async(action).await?; Ok(()) } - pub fn send_blocking(&self, action: ToActor) -> anyhow::Result<()> { + pub fn send_blocking(&self, action: ToActor) -> Result<()> { self.tx.send(action)?; Ok(()) } - pub async fn ingest_entry(&self, entry: AuthorisedEntry) -> anyhow::Result<()> { + pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::IngestEntry { entry, reply }).await?; + self.send(ToActor::IngestEntry { + authorised_entry, + origin: Origin::Local, + reply, + }) + .await?; reply_rx.await??; Ok(()) } - pub async fn insert_entry( - &self, - entry: Entry, - capability: WriteCapability, - ) -> anyhow::Result<()> { + pub async fn insert_entry(&self, entry: Entry, auth: impl Into) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InsertEntry { - entry, - capability, + entry: EntryOrForm::Entry(entry), + auth: auth.into(), reply, }) .await?; @@ -89,10 +93,23 @@ impl ActorHandle { Ok(()) } - pub async fn insert_secret( + pub async fn insert_form( &self, - secret: impl Into, - ) -> anyhow::Result<()> { + form: EntryForm, + authorisation: impl Into, + ) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::InsertEntry { + entry: EntryOrForm::Form(form), + auth: authorisation.into(), + reply, + }) + .await?; + let inserted = reply_rx.await??; + Ok(inserted) + } + + pub async fn insert_secret(&self, secret: impl Into) -> Result<()> { let secret = secret.into(); let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InsertSecret { secret, reply }).await?; @@ -104,7 +121,7 @@ impl ActorHandle { &self, namespace: NamespaceId, range: ThreeDRange, - ) -> anyhow::Result>> { + ) -> Result>> { let (tx, rx) = flume::bounded(1024); self.send(ToActor::GetEntries { namespace, @@ -122,7 +139,7 @@ impl ActorHandle { initial_transmission: InitialTransmission, channels: Channels, init: SessionInit, - ) -> anyhow::Result { + ) -> Result { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InitSession { our_role, @@ -158,6 +175,23 @@ pub struct SessionHandle { } impl SessionHandle { + fn new( + cancel_token: CancellationToken, + on_finish: oneshot::Receiver>, + ) -> Self { + let on_finish = on_finish + .map(|r| match r { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(Arc::new(err)), + Err(_) => Err(Arc::new(Error::ActorFailed)), + }) + .boxed() + .shared(); + SessionHandle { + on_finish, + cancel_token, + } + } /// Wait for the session to finish. /// /// Returns an error if the session failed to complete. @@ -184,26 +218,27 @@ pub enum ToActor { #[debug(skip)] channels: Channels, init: SessionInit, - reply: oneshot::Sender>, + reply: oneshot::Sender>, }, GetEntries { namespace: NamespaceId, range: ThreeDRange, #[debug(skip)] - reply: flume::Sender>, + reply: flume::Sender>, }, IngestEntry { - entry: AuthorisedEntry, - reply: oneshot::Sender>, + authorised_entry: AuthorisedEntry, + origin: Origin, + reply: oneshot::Sender>, }, InsertEntry { - entry: Entry, - capability: WriteCapability, + entry: EntryOrForm, + auth: AuthForm, reply: oneshot::Sender>, }, InsertSecret { secret: meadowcap::SecretKey, - reply: oneshot::Sender>, + reply: oneshot::Sender>, }, Shutdown { #[debug(skip)] @@ -226,29 +261,35 @@ pub struct Actor { next_session_id: u64, sessions: HashMap, session_tasks: JoinMap>, + tasks: JoinSet<()>, } impl Actor { - pub fn run(self) -> anyhow::Result<()> { + pub fn run(self) -> Result<()> { let rt = tokio::runtime::Builder::new_current_thread() .build() .expect("failed to start current-thread runtime for willow actor"); let local_set = tokio::task::LocalSet::new(); local_set.block_on(&rt, async move { self.run_async().await }) } - async fn run_async(mut self) -> anyhow::Result<()> { + async fn run_async(mut self) -> Result<()> { loop { tokio::select! { msg = self.inbox_rx.recv_async() => match msg { Err(_) => break, Ok(ToActor::Shutdown { reply }) => { + tokio::join!( + self.tasks.shutdown(), + self.session_tasks.shutdown() + ); + drop(self); if let Some(reply) = reply { reply.send(()).ok(); } break; } Ok(msg) => { - if self.handle_message(msg).is_err() { + if self.handle_message(msg).await.is_err() { warn!("failed to send reply: receiver dropped"); } } @@ -271,7 +312,7 @@ impl Actor { id } - fn handle_message(&mut self, message: ToActor) -> Result<(), SendReplyError> { + async fn handle_message(&mut self, message: ToActor) -> Result<(), SendReplyError> { trace!(%message, "tick: handle_message"); match message { ToActor::Shutdown { .. } => unreachable!("handled in run"), @@ -296,25 +337,13 @@ impl Actor { let task_key = self.session_tasks.spawn_local(id, future); let (on_finish_tx, on_finish_rx) = oneshot::channel(); - let active_session = ActiveSession { on_finish: on_finish_tx, task_key, peer, }; self.sessions.insert(id, active_session); - let on_finish = on_finish_rx - .map(|r| match r { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(Arc::new(err)), - Err(_) => Err(Arc::new(Error::ActorFailed)), - }) - .boxed() - .shared(); - let handle = SessionHandle { - on_finish, - cancel_token, - }; + let handle = SessionHandle::new(cancel_token, on_finish_rx); send_reply(reply, Ok(handle)) } ToActor::GetEntries { @@ -324,33 +353,33 @@ impl Actor { } => { let snapshot = self.store.entries().snapshot(); match snapshot { + Err(err) => reply.send(Err(err)).map_err(send_reply_error), Ok(snapshot) => { - iter_to_channel(reply, Ok(snapshot.get_entries(namespace, &range))) + self.tasks.spawn_local(async move { + let iter = snapshot.get_entries(namespace, &range); + for entry in iter { + if reply.send_async(entry).await.is_err() { + break; + } + } + }); + Ok(()) } - Err(err) => reply.send(Err(err)).map_err(send_reply_error), } } - ToActor::IngestEntry { entry, reply } => { - let res = self.store.entries().ingest(&entry, Origin::Local); + ToActor::IngestEntry { + authorised_entry, + origin, + reply, + } => { + let res = self.store.entries().ingest(&authorised_entry, origin); + send_reply(reply, res) + } + ToActor::InsertEntry { entry, auth, reply } => { + let res = self.store.insert_entry(entry, auth).await; + let res = res.map_err(Into::into); send_reply(reply, res) } - ToActor::InsertEntry { - entry, - capability, - reply, - } => send_reply_with(reply, self, |slf| { - let user_id = capability.receiver().id(); - let user_secret = slf - .store - .secrets() - .get_user(&user_id) - .ok_or(Error::MissingUserKey(user_id))?; - let authorised_entry = entry.attach_authorisation(capability, &user_secret)?; - slf.store - .entries() - .ingest(&authorised_entry, Origin::Local) - .map_err(Error::Store) - }), ToActor::InsertSecret { secret, reply } => { let res = self.store.secrets().insert(secret); send_reply(reply, res.map_err(anyhow::Error::from)) @@ -376,28 +405,14 @@ fn send_reply(sender: oneshot::Sender, value: T) -> Result<(), SendReplyEr sender.send(value).map_err(send_reply_error) } -fn send_reply_with( - sender: oneshot::Sender>, - this: &mut Actor, - f: impl FnOnce(&mut Actor) -> Result, -) -> Result<(), SendReplyError> { - sender.send(f(this)).map_err(send_reply_error) -} +// fn send_reply_with( +// sender: oneshot::Sender>, +// this: &mut Actor, +// f: impl FnOnce(&mut Actor) -> Result, +// ) -> Result<(), SendReplyError> { +// sender.send(f(this)).map_err(send_reply_error) +// } fn send_reply_error(_err: T) -> SendReplyError { SendReplyError } -fn iter_to_channel( - channel: flume::Sender>, - iter: anyhow::Result>>, -) -> Result<(), SendReplyError> { - match iter { - Err(err) => channel.send(Err(err)).map_err(send_reply_error)?, - Ok(iter) => { - for item in iter { - channel.send(item).map_err(send_reply_error)?; - } - } - } - Ok(()) -} diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs new file mode 100644 index 0000000000..e4c704ff15 --- /dev/null +++ b/iroh-willow/src/engine.rs @@ -0,0 +1,46 @@ +use anyhow::Result; +use iroh_blobs::protocol::ALPN; +use iroh_net::{endpoint::Connection, Endpoint, NodeId}; + +use crate::{ + actor::ActorHandle, + net, + session::{Role, SessionInit}, + store::memory, +}; + +#[derive(Debug, Clone)] +pub struct Engine { + endpoint: Endpoint, + handle: ActorHandle, +} + +impl Engine { + pub fn new(endpoint: Endpoint, handle: ActorHandle) -> Self { + Self { endpoint, handle } + } + + pub fn memory(endpoint: Endpoint) -> Self { + let me = endpoint.node_id(); + let payloads = iroh_blobs::store::mem::Store::default(); + let handle = ActorHandle::spawn(move || memory::Store::new(payloads), me); + Self::new(endpoint, handle) + } + + pub async fn handle_connection(&self, conn: Connection, init: SessionInit) -> Result<()> { + let our_role = Role::Betty; + let handle = self.handle.clone(); + let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; + session.join().await?; + Ok(()) + } + + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result<()> { + let our_role = Role::Alfie; + let conn = self.endpoint.connect_by_node_id(&peer, ALPN).await?; + let handle = self.handle.clone(); + let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; + session.join().await?; + Ok(()) + } +} diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs new file mode 100644 index 0000000000..032935f9f5 --- /dev/null +++ b/iroh-willow/src/form.rs @@ -0,0 +1,182 @@ +//! Types for forms for entries + +use std::{io, path::PathBuf}; + +use bytes::Bytes; +use futures_lite::Stream; +use iroh_base::hash::Hash; +use iroh_blobs::{ + store::{ImportMode, MapEntry}, + util::progress::IgnoreProgressSender, + BlobFormat, +}; +use serde::{Deserialize, Serialize}; +use tokio::io::AsyncRead; + +use crate::{ + proto::{ + keys::UserId, + willow::{ + AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, Timestamp, WriteCapability, + }, + }, + session::Error, + store::{ + traits::{SecretStorage as _, Storage}, + Store, + }, + util::time::system_time_now, +}; + +/// Sources where payload data can come from. +#[derive(derive_more::Debug)] +pub enum PayloadForm { + Hash(HashForm), + #[debug("Bytes({})", _0.len())] + Bytes(Bytes), + File(PathBuf, ImportMode), + #[debug("Stream")] + Stream(Box> + Send + Sync + Unpin>), + #[debug("Reader")] + Reader(Box), +} + +impl PayloadForm { + pub async fn submit( + self, + store: &S, + ) -> anyhow::Result<(Hash, u64)> { + let (hash, len) = match self { + PayloadForm::Hash(HashForm::Exact(digest, len)) => (digest, len), + PayloadForm::Hash(HashForm::Find(digest)) => { + let entry = store.get(&digest).await?; + let entry = entry.ok_or_else(|| anyhow::anyhow!("hash not foundA"))?; + (digest, entry.size().value()) + } + PayloadForm::Bytes(bytes) => { + let len = bytes.len(); + let temp_tag = store.import_bytes(bytes, BlobFormat::Raw).await?; + (*temp_tag.hash(), len as u64) + } + PayloadForm::File(path, mode) => { + let progress = IgnoreProgressSender::default(); + let (temp_tag, len) = store + .import_file(path, mode, BlobFormat::Raw, progress) + .await?; + (*temp_tag.hash(), len) + } + PayloadForm::Stream(stream) => { + let progress = IgnoreProgressSender::default(); + let (temp_tag, len) = store + .import_stream(stream, BlobFormat::Raw, progress) + .await?; + (*temp_tag.hash(), len) + } + PayloadForm::Reader(reader) => { + let progress = IgnoreProgressSender::default(); + let (temp_tag, len) = store + .import_reader(reader, BlobFormat::Raw, progress) + .await?; + (*temp_tag.hash(), len) + } + }; + Ok((hash, len)) + } +} + +#[derive(Debug)] +pub enum EntryOrForm { + Entry(Entry), + Form(EntryForm), +} + +#[derive(Debug)] +pub struct EntryForm { + pub namespace_id: NamespaceId, + pub subspace_id: SubspaceForm, + pub path: Path, + pub timestamp: TimestampForm, + pub payload: PayloadForm, +} + +impl EntryForm { + pub async fn into_entry( + self, + store: &Store, + user_id: UserId, // auth: AuthForm, + ) -> anyhow::Result { + let timestamp = match self.timestamp { + TimestampForm::Now => system_time_now(), + TimestampForm::Exact(timestamp) => timestamp, + }; + let subspace_id = match self.subspace_id { + SubspaceForm::User => user_id, + SubspaceForm::Exact(subspace) => subspace, + }; + let (payload_digest, payload_length) = self.payload.submit(store.payloads()).await?; + let entry = Entry { + namespace_id: self.namespace_id, + subspace_id, + path: self.path, + timestamp, + payload_length, + payload_digest, + }; + Ok(entry) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum HashForm { + Find(Hash), + Exact(Hash, u64), +} + +#[derive(Debug, Clone, Serialize, Deserialize, derive_more::From)] +pub enum AuthForm { + Find(UserId), + // TODO: WriteCapabilityHash + Exact(WriteCapability), +} + +impl AuthForm { + pub fn user_id(&self) -> UserId { + match self { + AuthForm::Find(user) => *user, + AuthForm::Exact(cap) => cap.receiver().id(), + } + } + pub fn into_write_cap(self, _store: &Store) -> Result { + match self { + AuthForm::Find(_) => todo!(), + AuthForm::Exact(cap) => Ok(cap), + } + } + + pub fn resolve_and_attach( + self, + store: &Store, + entry: Entry, + ) -> Result { + let cap = self.into_write_cap(store)?; + let user_id = cap.receiver().id(); + let secret_key = store + .secrets() + .get_user(&user_id) + .ok_or(Error::MissingUserKey(user_id))?; + let entry = entry.attach_authorisation(cap, &secret_key)?; + Ok(entry) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum SubspaceForm { + User, + Exact(SubspaceId), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TimestampForm { + Now, + Exact(Timestamp), +} diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index ad2fc4f743..423e49de97 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -3,6 +3,8 @@ #![allow(missing_docs)] pub mod actor; +pub mod engine; +pub mod form; pub mod net; pub mod proto; pub mod session; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index c168c5b642..378f8fd9dd 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -560,7 +560,7 @@ mod tests { #[allow(clippy::too_many_arguments)] async fn insert( - store: &ActorHandle, + actor: &ActorHandle, payload_store: &P, namespace_id: NamespaceId, write_cap: WriteCapability, @@ -585,7 +585,8 @@ mod tests { payload_len, ); track_entries.extend([entry.clone()]); - store.insert_entry(entry, write_cap.clone()).await?; + actor.insert_entry(entry, write_cap.clone()).await?; + drop(temp_tag); } Ok(()) } @@ -625,7 +626,6 @@ mod tests { async fn setup_capabilities( rng: &mut impl CryptoRngCore, - store: &ActorHandle, namespace_secret: &NamespaceSecretKey, ) -> anyhow::Result<(ReadCapability, WriteCapability)> { diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 0cc308daf5..09475f3853 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,8 +1,13 @@ -use std::cmp::Ordering; +use std::{cmp::Ordering, io}; use bytes::Bytes; use serde::{Deserialize, Serialize}; +use crate::{ + proto::willow::encodings::RelativePath, + util::codec::{compact_width, CompactWidth, Encoder}, +}; + use super::{ keys::NamespaceId, willow::{Entry, Path, SubspaceId, Timestamp}, @@ -179,6 +184,27 @@ pub enum RangeEnd { Open, } +impl RangeEnd { + /// Returns `true` if this range is closed. + pub fn is_closed(&self) -> bool { + matches!(self, RangeEnd::Closed(_)) + } + + /// Returns `true` if this range is open. + pub fn is_open(&self) -> bool { + matches!(self, RangeEnd::Open) + } +} + +impl RangeEnd { + pub fn or_max(self, max: T) -> T { + match self { + Self::Closed(value) => value, + Self::Open => max, + } + } +} + impl PartialOrd for RangeEnd { fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { @@ -259,9 +285,18 @@ impl Area { } pub fn includes_entry(&self, entry: &Entry) -> bool { - self.subspace.includes_subspace(&entry.subspace_id) - && self.path.is_prefix_of(&entry.path) - && self.times.includes(&entry.timestamp) + self.includes_point(&entry.subspace_id, &entry.path, &entry.timestamp) + } + + pub fn includes_point( + &self, + subspace_id: &SubspaceId, + path: &Path, + timestamp: &Timestamp, + ) -> bool { + self.subspace.includes_subspace(subspace_id) + && self.path.is_prefix_of(path) + && self.times.includes(timestamp) } pub fn includes_area(&self, other: &Area) -> bool { @@ -343,7 +378,7 @@ pub fn path_range_end(path: &Path) -> RangeEnd { RangeEnd::Open } else { out.reverse(); - RangeEnd::Closed(Path::from_bytes_unchecked(out)) + RangeEnd::Closed(Path::new_unchecked(out)) } } } @@ -415,3 +450,116 @@ impl SubspaceArea { } } } + +#[derive(thiserror::Error, Debug)] +#[error("area is not included in outer area")] +pub struct NotIncluded; + +#[derive(Debug, Clone)] +pub struct AreaInArea<'a> { + a: &'a Area, + out: &'a Area, +} + +impl<'a> AreaInArea<'a> { + pub fn new(inner: &'a Area, outer: &'a Area) -> Result { + if outer.includes_area(inner) { + Ok(Self { + a: inner, + out: outer, + }) + } else { + Err(NotIncluded) + } + } + fn start_diff(&self) -> u64 { + let a = self.a.times; + let out = self.out.times; + Ord::min( + a.start.saturating_sub(out.start), + out.end.or_max(Timestamp::MAX) - a.start, + ) + } + + fn end_diff(&self) -> u64 { + let a = self.a.times; + let out = self.out.times; + Ord::min( + a.end.or_max(Timestamp::MAX).saturating_sub(out.start), + out.end + .or_max(Timestamp::MAX) + .saturating_sub(a.end.or_max(Timestamp::MAX)), + ) + } +} + +impl<'a> Encoder for AreaInArea<'a> { + fn encoded_len(&self) -> usize { + let subspace_is_same = self.a.subspace == self.out.subspace; + let mut len = 1; + if !subspace_is_same { + len += SubspaceId::LENGTH; + } + let relative_path = RelativePath::new(&self.a.path, &self.out.path); + len += relative_path.encoded_len(); + len += CompactWidth(self.start_diff()).encoded_len(); + if self.a.times.end.is_closed() { + len += CompactWidth(self.end_diff()).encoded_len(); + } + len + } + + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + let mut bits = 0u8; + let subspace_is_same = self.a.subspace == self.out.subspace; + if !subspace_is_same { + bits |= 0b0000_0001; + } + if self.a.times.is_open() { + bits |= 0b0000_0010; + } + let start_diff = self.start_diff(); + let end_diff = self.start_diff(); + if start_diff == self.a.times.start.saturating_sub(self.out.times.start) { + bits |= 0b0000_0100; + } + if end_diff + == self + .a + .times + .end + .or_max(Timestamp::MAX) + .saturating_sub(self.a.times.start) + { + bits |= 0b0000_1000; + } + if let 4 | 8 = compact_width(start_diff) { + bits |= 0b0001_0000; + } + if let 2 | 8 = compact_width(start_diff) { + bits |= 0b0010_0000; + } + if let 4 | 8 = compact_width(end_diff) { + bits |= 0b0100_0000; + } + if let 2 | 8 = compact_width(end_diff) { + bits |= 0b1000_0000; + } + out.write_all(&[bits])?; + match self.a.subspace { + SubspaceArea::Any => { + debug_assert!(subspace_is_same, "outers subspace must be any"); + } + SubspaceArea::Id(subspace_id) => { + out.write_all(subspace_id.as_bytes())?; + } + } + let relative_path = RelativePath::new(&self.a.path, &self.out.path); + relative_path.encode_into(out)?; + CompactWidth(start_diff).encode_into(out)?; + if self.a.times.end.is_closed() { + CompactWidth(end_diff).encode_into(out)?; + } + Ok(()) + } +} diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index a036470aeb..e88cc21d6e 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -21,6 +21,9 @@ pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; macro_rules! bytestring { ($ty:ty, $n:ident) => { impl $ty { + /// Length of the byte encoding of [`Self`]. + pub const LENGTH: usize = $n; + /// Convert to a base32 string limited to the first 10 bytes for a friendly string /// representation of the key. pub fn fmt_short(&self) -> String { @@ -74,7 +77,7 @@ impl NamespaceSecretKey { loop { let signing_key = SigningKey::generate(rng); let secret_key = NamespaceSecretKey(signing_key); - if secret_key.public_key().namespace_type() == typ { + if secret_key.public_key().kind() == typ { break secret_key; } } @@ -123,7 +126,7 @@ impl NamespacePublicKey { is_communal(self.as_bytes()) } - pub fn namespace_type(&self) -> NamespaceKind { + pub fn kind(&self) -> NamespaceKind { if self.is_communal() { NamespaceKind::Communal } else { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index b0b51e2109..13f996fc37 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,10 +1,13 @@ +use std::cmp::Ordering; +use std::io::Write; + use serde::{Deserialize, Serialize}; -use crate::util::codec::Encoder; +use crate::{proto::grouping::NotIncluded, util::codec::Encoder}; use super::{ - grouping::Area, - keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH}, + grouping::{Area, AreaInArea}, + keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH}, willow::{AuthorisedEntry, Entry, Unauthorised}, }; @@ -63,6 +66,7 @@ pub fn attach_authorisation( secret_key: &UserSecretKey, ) -> Result { if capability.access_mode() != AccessMode::Write + || capability.granted_namespace().id() != entry.namespace_id || !capability.granted_area().includes_entry(&entry) || capability.receiver() != &secret_key.public_key() { @@ -150,6 +154,29 @@ pub enum McCapability { } impl McCapability { + pub fn new_owned( + namespace_secret: NamespaceSecretKey, + user_key: UserPublicKey, + access_mode: AccessMode, + ) -> Self { + McCapability::Owned(OwnedCapability::new( + &namespace_secret, + user_key, + access_mode, + )) + } + + pub fn new_communal( + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, + access_mode: AccessMode, + ) -> Self { + McCapability::Communal(CommunalCapability::new( + namespace_key, + user_key, + access_mode, + )) + } pub fn access_mode(&self) -> AccessMode { match self { Self::Communal(cap) => cap.access_mode, @@ -197,6 +224,35 @@ impl McCapability { false => Err(InvalidCapability), } } + + pub fn delegations(&self) -> &[Delegation] { + match self { + Self::Communal(cap) => &cap.delegations, + Self::Owned(cap) => &cap.delegations, + } + } + + /// Returns `true` if `self` has less delegations or covers a larger area than `other`. + pub fn is_wider_than(&self, other: &Self) -> bool { + match self.delegations().len().cmp(&other.delegations().len()) { + Ordering::Less => true, + Ordering::Greater => false, + Ordering::Equal => self.granted_area().includes_area(&other.granted_area()), + } + } + + pub fn delegate( + &self, + user_secret: &UserSecretKey, + new_user: UserPublicKey, + new_area: Area, + ) -> anyhow::Result { + let cap = match self { + Self::Communal(cap) => Self::Communal(cap.delegate(user_secret, new_user, new_area)?), + Self::Owned(cap) => Self::Owned(cap.delegate(user_secret, new_user, new_area)?), + }; + Ok(cap) + } } impl Encoder for McCapability { @@ -230,13 +286,27 @@ pub struct CommunalCapability { /// Remember that we assume SubspaceId and UserPublicKey to be the same types. user_key: UserPublicKey, /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. - delegations: Vec<(Area, UserPublicKey, UserSignature)>, + delegations: Vec, } impl CommunalCapability { + pub fn new( + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, + access_mode: AccessMode, + ) -> Self { + Self { + access_mode, + namespace_key, + user_key, + delegations: Default::default(), + } + } pub fn receiver(&self) -> &UserPublicKey { - // TODO: support delegations - &self.user_key + match self.delegations.last() { + None => &self.user_key, + Some((_, user_key, _)) => user_key, + } } pub fn granted_namespace(&self) -> &NamespacePublicKey { @@ -244,21 +314,94 @@ impl CommunalCapability { } pub fn granted_area(&self) -> Area { - // TODO: support delegations - Area::subspace(self.user_key.into()) + match self.delegations.last() { + None => Area::subspace(self.user_key.into()), + Some((area, _, _)) => area.clone(), + } } pub fn is_valid(&self) -> bool { + self.validate().is_ok() + } + + pub fn validate(&self) -> anyhow::Result<()> { if self.delegations.is_empty() { // communal capabilities without delegations are always valid - true + Ok(()) } else { - // TODO: support delegations - false + let mut prev = None; + let mut prev_receiver = self.receiver(); + for delegation in self.delegations.iter() { + let (new_area, new_user, new_signature) = &delegation; + let signable = self.handover(prev, new_area, new_user)?; + prev_receiver.verify(&signable, new_signature)?; + prev = Some((new_area, new_signature)); + prev_receiver = new_user; + } + Ok(()) + } + } + + pub fn delegate( + &self, + user_secret: &UserSecretKey, + new_user: UserPublicKey, + new_area: Area, + ) -> anyhow::Result { + if user_secret.public_key() != *self.receiver() { + anyhow::bail!("Secret key does not match receiver of current capability"); + } + let prev = self + .delegations + .last() + .map(|(area, _user_key, sig)| (area, sig)); + let handover = self.handover(prev, &new_area, &new_user)?; + let signature = user_secret.sign(&handover); + let delegation = (new_area, new_user, signature); + let mut cap = self.clone(); + cap.delegations.push(delegation); + Ok(cap) + } + + fn handover( + &self, + prev: Option<(&Area, &UserSignature)>, + new_area: &Area, + new_user: &UserPublicKey, + ) -> anyhow::Result> { + match prev { + None => self.initial_handover(new_area, new_user), + Some((prev_area, prev_signature)) => { + let handover = Handover::new(prev_area, prev_signature, new_area, new_user)?; + handover.encode() + } } } + + fn initial_handover( + &self, + new_area: &Area, + new_user: &UserPublicKey, + ) -> anyhow::Result> { + let prev_area = self.granted_area(); + let area_in_area = AreaInArea::new(new_area, &prev_area)?; + let len = + 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; + let mut out = std::io::Cursor::new(vec![0u8; len]); + let init = match self.access_mode { + AccessMode::Read => 0x00, + AccessMode::Write => 0x01, + }; + out.write_all(&[init])?; + out.write_all(&self.namespace_key.to_bytes())?; + area_in_area.encode_into(&mut out)?; + out.write_all(&new_user.to_bytes())?; + todo!() + } } +pub type Delegation = (Area, UserPublicKey, UserSignature); + /// A capability that authorizes reads or writes in owned namespaces. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct OwnedCapability { @@ -271,7 +414,7 @@ pub struct OwnedCapability { /// Authorisation of the user_key by the namespace_key., initial_authorisation: NamespaceSignature, /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. - delegations: Vec<(Area, UserPublicKey, UserSignature)>, + delegations: Vec, } impl OwnedCapability { @@ -293,8 +436,10 @@ impl OwnedCapability { } pub fn receiver(&self) -> &UserPublicKey { - // TODO: support delegations - &self.user_key + match self.delegations.last() { + None => &self.user_key, + Some((_, user_key, _)) => user_key, + } } pub fn granted_namespace(&self) -> &NamespacePublicKey { @@ -302,20 +447,40 @@ impl OwnedCapability { } pub fn granted_area(&self) -> Area { - // TODO: support delegations - Area::full() + match self.delegations.last() { + None => Area::full(), + Some((area, _, _)) => area.clone(), + } } pub fn is_valid(&self) -> bool { + self.validate().is_ok() + } + + pub fn validate(&self) -> anyhow::Result<()> { + // verify root authorisation + let signable = Self::signable(self.access_mode, &self.user_key); + self.namespace_key + .verify(&signable, &self.initial_authorisation)?; + + // no delegations: done if self.delegations.is_empty() { - let signable = Self::signable(self.access_mode, &self.user_key); - self.namespace_key - .verify(&signable, &self.initial_authorisation) - .is_ok() - } else { - // TODO: support delegations - false + return Ok(()); + } + + let mut prev = ( + &self.granted_area(), + self.receiver(), + PrevSignature::Namespace(&self.initial_authorisation), + ); + for delegation in self.delegations.iter() { + let (new_area, new_user, new_signature) = delegation; + let handover = Handover::new(prev.0, prev.2, new_area, new_user)?; + let signable = handover.encode()?; + prev.1.verify(&signable, new_signature)?; + prev = (new_area, new_user, new_signature.into()); } + Ok(()) } fn signable(access_mode: AccessMode, user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { @@ -333,6 +498,81 @@ impl OwnedCapability { signable[1..].copy_from_slice(user_key.as_bytes()); signable } + + pub fn delegate( + &self, + secret_key: &UserSecretKey, + new_user: UserPublicKey, + new_area: Area, + ) -> anyhow::Result { + if secret_key.public_key() != *self.receiver() { + anyhow::bail!("Secret key does not match receiver of current capability"); + } + let prev_signature = match self.delegations.last() { + None => PrevSignature::Namespace(&self.initial_authorisation), + Some((_, _, prev_signature)) => PrevSignature::User(prev_signature), + }; + let prev_area = self.granted_area(); + let handover = Handover::new(&prev_area, prev_signature, &new_area, &new_user)?; + let signable = handover.encode()?; + let signature = secret_key.sign(&signable); + let delegation = (new_area, new_user, signature); + let mut cap = self.clone(); + cap.delegations.push(delegation); + Ok(cap) + } +} + +#[derive(Debug, derive_more::From)] +enum PrevSignature<'a> { + User(&'a UserSignature), + Namespace(&'a NamespaceSignature), +} + +impl<'a> PrevSignature<'a> { + fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] { + match self { + Self::User(sig) => sig.to_bytes(), + Self::Namespace(sig) => sig.to_bytes(), + } + } +} + +#[derive(Debug)] +struct Handover<'a> { + // prev_area: &'a Area, + // new_area: &'a Area, + prev_signature: PrevSignature<'a>, + new_user: &'a UserPublicKey, + area_in_area: AreaInArea<'a>, +} + +impl<'a> Handover<'a> { + fn new( + prev_area: &'a Area, + prev_signature: impl Into>, + new_area: &'a Area, + new_user: &'a UserPublicKey, + ) -> Result { + let area_in_area = AreaInArea::new(new_area, prev_area)?; + Ok(Self { + area_in_area, + prev_signature: prev_signature.into(), + new_user, + }) + } +} + +impl<'a> Encoder for Handover<'a> { + fn encoded_len(&self) -> usize { + self.area_in_area.encoded_len() + NamespaceSignature::LENGTH + UserId::LENGTH + } + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + self.area_in_area.encode_into(out)?; + out.write_all(&self.prev_signature.to_bytes())?; + out.write_all(&self.new_user.to_bytes())?; + Ok(()) + } } #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index f4417a1a18..1110cdf3dd 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -42,13 +42,35 @@ pub type DynamicToken = meadowcap::UserSignature; /// We describe the details in a capability-system-agnostic way here. /// To use Meadowcap for this approach, simply choose the type of valid McCapabilities with access mode read as the read capabilities. pub type ReadCapability = meadowcap::McCapability; + +/// Whenever a peer is granted a complete read capability of non-empty path, +/// it should also be granted a corresponding subspace capability. +/// Each subspace capability must have a single receiver (a public key of some signature scheme), +/// and a single granted namespace (a NamespaceId). +/// The receiver can authenticate itself by signing a collaboratively selected nonce. pub type SubspaceCapability = meadowcap::McSubspaceCapability; + pub type SyncSignature = meadowcap::UserSignature; + pub type Receiver = meadowcap::UserPublicKey; /// Represents an authorisation to read an area of data in a Namespace. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReadAuthorisation(ReadCapability, Option); +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct ReadAuthorisation(pub ReadCapability, pub Option); + +impl ReadAuthorisation { + pub fn new(read_cap: ReadCapability, subspace_cap: Option) -> Self { + Self(read_cap, subspace_cap) + } + + pub fn read_cap(&self) -> &ReadCapability { + &self.0 + } + + pub fn subspace_cap(&self) -> Option<&SubspaceCapability> { + self.1.as_ref() + } +} /// The different resource handles employed by the WGPS. #[derive(Debug, Serialize, Deserialize, strum::Display)] diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index 8c9ff2311e..a53b4a9153 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -65,6 +65,11 @@ pub enum InvalidPath { TooManyComponents, } +/// A [`Path`] is a sequence of at most [`MAX_COMPONENT_COUNT`] many bytestrings, +/// each of at most [`MAX_COMPONENT_LENGTH`] bytes, and whose total number of bytes +/// is at most [`MAX_PATH_LENGTH`]. +/// +/// The bytestrings that make up a [`Path`] are called its [`Component`]s. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct Path(Arc<[Component]>); @@ -80,10 +85,10 @@ impl Path { .iter() .map(|c| Bytes::copy_from_slice(c)) .collect(); - Ok(Self::from_bytes_unchecked(components)) + Ok(Self::new_unchecked(components)) } - pub fn from_bytes_unchecked(components: Vec) -> Self { + pub fn new_unchecked(components: Vec) -> Self { let path: Arc<[Component]> = components.into(); Path(path) } @@ -125,6 +130,22 @@ impl Path { None } } + + pub fn common_prefix(&self, other: &Path) -> &[Component] { + &self[..self.common_prefix_len(other)] + } + + pub fn common_prefix_len(&self, other: &Path) -> usize { + self.iter() + .zip(other.iter()) + .take_while(|(a, b)| a == b) + .count() + } + + pub fn remove_prefix(&self, count: usize) -> Path { + let start = count.min(self.len()); + Self::new_unchecked(self[start..].to_vec()) + } } impl std::ops::Deref for Path { @@ -224,7 +245,7 @@ impl Entry { pub fn attach_authorisation( self, - capability: McCapability, + capability: WriteCapability, secret_key: &UserSecretKey, ) -> Result { attach_authorisation(self, capability, secret_key) @@ -347,7 +368,10 @@ pub mod encodings { use bytes::Bytes; - use crate::{proto::keys::PUBLIC_KEY_LENGTH, util::codec::Encoder}; + use crate::{ + proto::willow::{NamespaceId, SubspaceId}, + util::codec::Encoder, + }; use super::{Entry, Path, DIGEST_LENGTH}; @@ -356,10 +380,10 @@ pub mod encodings { /// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). /// /// The value `2` means that we can encode paths up to 64KiB long. - const PATH_LENGTH_POWER: usize = 2; - const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; - type UPathLengthPower = u16; - type UPathCountPower = u16; + pub const PATH_LENGTH_POWER: usize = 2; + pub const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; + pub type UPathLengthPower = u16; + pub type UPathCountPower = u16; impl Encoder for Path { fn encoded_len(&self) -> usize { @@ -394,7 +418,34 @@ pub mod encodings { fn encoded_len(&self) -> usize { let path_len = self.path.encoded_len(); - PUBLIC_KEY_LENGTH + PUBLIC_KEY_LENGTH + path_len + 8 + 8 + DIGEST_LENGTH + NamespaceId::LENGTH + SubspaceId::LENGTH + path_len + 8 + 8 + DIGEST_LENGTH + } + } + + #[derive(Debug, Clone)] + pub struct RelativePath<'a> { + pub path: &'a Path, + pub reference: &'a Path, + } + impl<'a> RelativePath<'a> { + pub fn new(path: &'a Path, reference: &'a Path) -> Self { + Self { path, reference } + } + } + + impl<'a> Encoder for RelativePath<'a> { + fn encoded_len(&self) -> usize { + let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; + let remaining_path = self.path.remove_prefix(common_prefix_len as usize); + PATH_COUNT_POWER + remaining_path.encoded_len() + } + + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; + out.write_all(&common_prefix_len.to_be_bytes())?; + let remaining_path = self.path.remove_prefix(common_prefix_len as usize); + remaining_path.encode_into(out)?; + Ok(()) } } } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index c8d6720ce1..aeb15d4c8c 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,154 +1,160 @@ -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; -use tokio::sync::broadcast; +use anyhow::{anyhow, Result}; +use rand_core::CryptoRngCore; use crate::{ + form::{AuthForm, EntryOrForm}, proto::{ grouping::Area, - willow::{AuthorisedEntry, NamespaceId}, + keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, + meadowcap::AccessMode, + }, + session::Error, + store::{ + auth::{AuthError, CapSelector, CapabilityPack}, + traits::SecretStorage, }, - session::SessionId, }; -use self::traits::{EntryStorage, Storage}; +use self::{auth::AuthStore, traits::Storage}; + +pub use self::entry::{Origin, WatchableEntryStore}; +pub mod auth; +pub mod entry; pub mod memory; pub mod traits; -const BROADCAST_CAP: usize = 1024; - -#[derive(Debug, Clone, Copy)] -pub enum Origin { - Local, - Remote(SessionId), -} - #[derive(Debug, Clone)] pub struct Store { - storage: S, - entries: EntryStore, -} - -#[derive(Debug, Clone)] -pub struct EntryStore { - storage: ES, - broadcast: Arc>, + entries: WatchableEntryStore, + secrets: S::Secrets, + payloads: S::Payloads, + auth: AuthStore, } impl Store { - pub fn entries(&self) -> &EntryStore { + pub fn new(storage: S) -> Self { + Self { + entries: WatchableEntryStore::new(storage.entries().clone()), + secrets: storage.secrets().clone(), + payloads: storage.payloads().clone(), + auth: Default::default(), + } + } + + pub fn entries(&self) -> &WatchableEntryStore { &self.entries } pub fn secrets(&self) -> &S::Secrets { - self.storage.secrets() + &self.secrets } pub fn payloads(&self) -> &S::Payloads { - self.storage.payloads() - } -} - -impl EntryStore { - pub fn reader(&self) -> ES::Reader { - self.storage.reader() + &self.payloads } - pub fn snapshot(&self) -> anyhow::Result { - self.storage.snapshot() + pub fn auth(&self) -> &AuthStore { + &self.auth } - pub fn ingest(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { - if self.storage.ingest_entry(entry)? { - self.broadcast.lock().unwrap().broadcast(entry, origin); - Ok(true) - } else { - Ok(false) - } - } - - pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { - self.broadcast.lock().unwrap().subscribe(session_id) - } - - pub fn unsubscribe(&self, session_id: &SessionId) { - self.broadcast.lock().unwrap().unsubscribe(session_id) + pub async fn insert_entry(&self, entry: EntryOrForm, auth: AuthForm) -> Result { + let user_id = auth.user_id(); + let entry = match entry { + EntryOrForm::Entry(entry) => Ok(entry), + EntryOrForm::Form(form) => form.into_entry(self, user_id).await, + }?; + let capability = match auth { + AuthForm::Exact(cap) => cap, + AuthForm::Find(user_id) => { + let selector = CapSelector::for_entry(&entry, user_id); + self.auth() + .get_write(selector)? + .ok_or_else(|| anyhow!("no write capability available"))? + } + }; + let secret_key = self + .secrets() + .get_user(&user_id) + .ok_or(Error::MissingUserKey(user_id))?; + let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; + self.entries().ingest(&authorised_entry, Origin::Local) } - pub fn watch_area(&self, session: SessionId, namespace: NamespaceId, area: Area) { - self.broadcast - .lock() - .unwrap() - .watch_area(session, namespace, area); + pub fn mint_namespace( + &self, + rng: &mut impl CryptoRngCore, + kind: NamespaceKind, + owner: UserId, + ) -> Result { + let namespace_secret = NamespaceSecretKey::generate(rng, kind); + let namespace_id = namespace_secret.id(); + self.secrets().insert_namespace(namespace_secret)?; + self.mint_capabilities(namespace_id, owner)?; + Ok(namespace_id) } -} -impl Store { - pub fn new(store: S) -> Self { - Self { - entries: EntryStore { - storage: store.entries().clone(), - broadcast: Default::default(), - }, - storage: store, + pub fn delegate_capability( + &self, + namespace_id: NamespaceId, + prev_user: UserId, + access_mode: AccessMode, + new_user: UserId, + new_area: Area, + ) -> anyhow::Result> { + match access_mode { + AccessMode::Write => { + let write_cap = self.auth.delegate( + &self.secrets, + namespace_id, + prev_user, + AccessMode::Write, + new_user, + new_area, + )?; + Ok(vec![write_cap]) + } + AccessMode::Read => { + let write_cap = self.auth.delegate( + &self.secrets, + namespace_id, + prev_user, + AccessMode::Write, + new_user, + new_area.clone(), + )?; + let read_cap = self.auth.delegate( + &self.secrets, + namespace_id, + prev_user, + AccessMode::Read, + new_user, + new_area, + )?; + Ok(vec![write_cap, read_cap]) + } } } - pub fn entry_broadcast(&self) -> &EntryStore { - &self.entries + fn mint_capabilities( + &self, + namespace_id: NamespaceId, + user_id: UserId, + ) -> Result<(), AuthError> { + self.auth + .mint(&self.secrets, namespace_id, user_id, AccessMode::Read)?; + self.auth + .mint(&self.secrets, namespace_id, user_id, AccessMode::Write)?; + Ok(()) } -} -#[derive(Debug, Default)] -struct BroadcastInner { - senders: HashMap>, - areas: HashMap>>, -} - -impl BroadcastInner { - fn subscribe(&mut self, session: SessionId) -> broadcast::Receiver { - self.senders - .entry(session) - .or_insert_with(|| broadcast::Sender::new(BROADCAST_CAP)) - .subscribe() - } - - fn unsubscribe(&mut self, session: &SessionId) { - self.senders.remove(session); - self.areas.retain(|_namespace, sessions| { - sessions.remove(session); - !sessions.is_empty() - }); - } - - fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { - self.areas - .entry(namespace) - .or_default() - .entry(session) - .or_default() - .push(area) - } - - fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { - let Some(sessions) = self.areas.get_mut(&entry.namespace_id()) else { - return; - }; - for (session_id, areas) in sessions { - if let Origin::Remote(origin) = origin { - if origin == *session_id { - continue; - } - } - if areas.iter().any(|area| area.includes_entry(entry.entry())) { - self.senders - .get(session_id) - .expect("session sender to exist") - .send(entry.clone()) - .ok(); - } - } - } + // pub fn delegate( + // &self, + // namespace_id: NamespaceId, + // access_mode: AccessMode, + // from: UserId, + // area: Area, + // store: bool, + // ) -> Option { + // } } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs new file mode 100644 index 0000000000..a3ba164053 --- /dev/null +++ b/iroh-willow/src/store/auth.rs @@ -0,0 +1,289 @@ +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use anyhow::Result; + +use crate::{ + proto::{ + grouping::Area, + keys::{NamespaceId, NamespaceKind, UserId, UserPublicKey}, + meadowcap::{AccessMode, McCapability}, + sync::ReadAuthorisation, + willow::{Entry, Path, SubspaceId, Timestamp, WriteCapability}, + }, + store::traits::{SecretStorage, SecretStoreError}, +}; + +#[derive(Debug)] +pub struct CapSelector { + user_id: UserId, + namespace_id: NamespaceId, + granted_area: AreaSelector, +} + +#[derive(Debug)] +pub enum AreaSelector { + Area(Area), + Point { + subspace_id: SubspaceId, + path: Path, + timestamp: Timestamp, + }, +} + +impl AreaSelector { + pub fn included_in(&self, other: &Area) -> bool { + match self { + AreaSelector::Area(area) => other.includes_area(area), + AreaSelector::Point { + subspace_id, + path, + timestamp, + } => other.includes_point(subspace_id, path, timestamp), + } + } +} + +impl CapSelector { + pub fn for_entry(entry: &Entry, user_id: UserId) -> Self { + let granted_area = AreaSelector::Point { + path: entry.path.clone(), + timestamp: entry.timestamp, + subspace_id: entry.subspace_id, + }; + Self { + namespace_id: entry.namespace_id, + user_id, + granted_area, + } + } +} + +#[derive(Debug)] +pub enum CapabilityPack { + Read(ReadAuthorisation), + Write(WriteCapability), +} + +// #[derive(Debug)] +// pub enum CapabilityRoot { +// Owned(NamespaceSecretKey), +// Communal(NamespacePublicKey), +// } +// +// impl CapabilityRoot { +// pub fn kind(&self) -> NamespaceKind { +// match self { +// CapabilityRoot::Owned(_) => NamespaceKind::Owned, +// CapabilityRoot::Communal(_) => NamespaceKind::Communal, +// } +// } +// fn for_namespace( +// namespace: NamespacePublicKey, +// secrets: S, +// ) -> Result { +// match namespace.kind() { +// NamespaceKind::Communal => Ok(CapabilityRoot::Communal(namespace)), +// NamespaceKind::Owned => { +// let secret = secrets +// .get_namespace(&namespace.id()) +// .ok_or(AuthError::MissingNamespaceSecret)?; +// Ok(CapabilityRoot::Owned(secret)) +// } +// } +// } +// } + +#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)] +pub struct CapabilityHash(iroh_base::hash::Hash); + +#[derive(Debug, Default, Clone)] +pub struct AuthStore { + inner: Arc>, +} +impl AuthStore { + pub fn get_write(&self, selector: CapSelector) -> Result, AuthError> { + Ok(self.inner.read().unwrap().get_write_authorisation(selector)) + } + + pub fn get_read(&self, selector: CapSelector) -> Result, AuthError> { + Ok(self.inner.read().unwrap().get_read_authorisation(selector)) + } + + pub fn insert(&self, cap: CapabilityPack) { + self.inner.write().unwrap().insert_capability(cap); + } + + pub fn mint( + &self, + secrets: &S, + namespace_id: NamespaceId, + user_id: UserId, + access_mode: AccessMode, + ) -> Result<(), AuthError> { + let namespace_key = namespace_id + .into_public_key() + .map_err(|_| AuthError::InvalidNamespaceId)?; + let user_key: UserPublicKey = user_id + .into_public_key() + .map_err(|_| AuthError::InvalidUserId)?; + let cap = match namespace_key.kind() { + NamespaceKind::Owned => { + let namespace_secret = secrets + .get_namespace(&namespace_id) + .ok_or(AuthError::MissingNamespaceSecret)?; + McCapability::new_owned(namespace_secret, user_key, access_mode) + } + NamespaceKind::Communal => { + McCapability::new_communal(namespace_key, user_key, access_mode) + } + }; + let pack = match access_mode { + AccessMode::Read => CapabilityPack::Read(ReadAuthorisation::new(cap, None)), + AccessMode::Write => CapabilityPack::Write(cap), + }; + self.insert(pack); + Ok(()) + } + + pub fn delegate( + &self, + secrets: &S, + namespace_id: NamespaceId, + prev_user: UserId, + access_mode: AccessMode, + new_user: UserId, + new_area: Area, + ) -> Result { + let new_user_key = new_user + .into_public_key() + .map_err(|_| AuthError::InvalidUserId)?; + let selector = CapSelector { + user_id: prev_user, + namespace_id, + granted_area: AreaSelector::Area(new_area.clone()), + }; + let pack = match access_mode { + AccessMode::Write => { + let cap = self + .get_write(selector)? + .ok_or(AuthError::NoCapabilityFound)?; + let user_secret = secrets + .get_user(&cap.receiver().id()) + .ok_or(AuthError::MissingUserSecret)?; + let new_cap = cap.delegate(&user_secret, new_user_key, new_area)?; + CapabilityPack::Write(new_cap) + } + AccessMode::Read => { + let auth = self + .get_read(selector)? + .ok_or(AuthError::NoCapabilityFound)?; + let ReadAuthorisation(read_cap, _subspace_cap) = auth; + let user_secret = secrets + .get_user(&read_cap.receiver().id()) + .ok_or(AuthError::MissingUserSecret)?; + let new_read_cap = read_cap.delegate(&user_secret, new_user_key, new_area)?; + // TODO: Subspace capability + CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, None)) + } + }; + Ok(pack) + } +} +#[derive(Debug, Default)] +pub struct Inner { + write_caps: HashMap>, + read_caps: HashMap>, +} + +impl Inner { + fn get_write_authorisation(&self, selector: CapSelector) -> Option { + let candidates = self + .write_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|cap| { + cap.receiver().id() == selector.user_id + && selector.granted_area.included_in(&cap.granted_area()) + }); + + // Select the best candidate, by sorting for + // * smallest number of delegations + // * widest area + let best = candidates.reduce(|prev, next| match next.is_wider_than(prev) { + true => next, + false => prev, + }); + best.cloned() + } + + fn get_read_authorisation(&self, selector: CapSelector) -> Option { + let candidates = self + .read_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|auth| { + let cap = &auth.0; + cap.receiver().id() == selector.user_id + && selector.granted_area.included_in(&cap.granted_area()) + }); + + // Select the best candidate, by sorting for + // * smallest number of delegations + // * widest area + let best = candidates.reduce(|prev, next| match next.0.is_wider_than(&prev.0) { + true => next, + false => prev, + }); + best.cloned() + } + + fn insert_capability(&mut self, cap: CapabilityPack) { + match cap { + CapabilityPack::Read(_) => todo!(), + CapabilityPack::Write(_) => todo!(), + } + } +} + +// fn mint_capability( +// namespace_secret: &NamespaceSecretKey, +// user_public_key: UserPublicKey, +// ) -> (ReadCapability, WriteCapability) { +// let read_capability = McCapability::Owned(OwnedCapability::new( +// namespace_secret, +// user_public_key, +// AccessMode::Read, +// )); +// let write_capability = McCapability::Owned(OwnedCapability::new( +// namespace_secret, +// user_public_key, +// AccessMode::Write, +// )); +// (read_capability, write_capability) +// } + +#[derive(thiserror::Error, Debug)] +pub enum AuthError { + #[error("invalid user id")] + InvalidUserId, + #[error("invalid namespace id")] + InvalidNamespaceId, + #[error("missing user secret")] + MissingUserSecret, + #[error("missing namespace secret")] + MissingNamespaceSecret, + #[error("wrong root token for namespace kind")] + WrongRootToken, + #[error("secret store error: {0}")] + SecretStore(#[from] SecretStoreError), + #[error("no capability found")] + NoCapabilityFound, + // TODO: remove + #[error("other: {0}")] + Other(#[from] anyhow::Error), +} diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs new file mode 100644 index 0000000000..7403745bb6 --- /dev/null +++ b/iroh-willow/src/store/entry.rs @@ -0,0 +1,153 @@ +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; +use tokio::sync::broadcast; + +use crate::{ + proto::{ + grouping::Area, + willow::{AuthorisedEntry, NamespaceId}, + }, + session::SessionId, +}; + +use super::traits::EntryStorage; + +const BROADCAST_CAP: usize = 1024; + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum Origin { + /// The entry is inserted locally. + Local, + /// The entry is synced in a sync session. + Remote(SessionId), +} + +#[derive(Debug, Clone)] +pub struct WatchableEntryStore { + storage: ES, + broadcast: Arc>, +} + +impl WatchableEntryStore { + pub(super) fn new(storage: ES) -> Self { + Self { + storage, + broadcast: Default::default(), + } + } + + /// Returns a store reader. + pub fn reader(&self) -> ES::Reader { + self.storage.reader() + } + + /// Returns a store snapshot. + pub fn snapshot(&self) -> anyhow::Result { + self.storage.snapshot() + } + + /// Ingest a new entry. + /// + /// Returns `true` if the entry was stored, and `false` if the entry already exists or is + /// obsoleted by an existing entry. + pub fn ingest(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { + if self.storage.ingest_entry(entry)? { + self.broadcast.lock().unwrap().broadcast(entry, origin); + Ok(true) + } else { + Ok(false) + } + } + + /// Setup a new subscription, identified by `session_id`. + /// + /// The subscription will initially be empty. To actually receive newly ingested entries, + /// add areas to watch with [`Self::watch_area`]. + /// + /// Returns a [`broadcast::Receiver`]. + pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { + self.broadcast + .lock() + .unwrap() + .subscribe(session_id, BROADCAST_CAP) + } + + /// Remove a subscription. + pub fn unsubscribe(&self, session_id: &SessionId) { + self.broadcast.lock().unwrap().unsubscribe(session_id) + } + + /// Add an area to the list of watched areas for a subscription. + /// + /// The subscription has to be setup with [`Self::subscribe`] to actually receive new entries + /// that fall within the area. + pub fn watch_area(&self, session: SessionId, namespace: NamespaceId, area: Area) { + self.broadcast + .lock() + .unwrap() + .watch_area(session, namespace, area); + } +} + +#[derive(Debug, Default)] +struct Broadcaster { + senders: HashMap>, + watched_areas: HashMap>>, +} + +impl Broadcaster { + fn subscribe( + &mut self, + session: SessionId, + cap: usize, + ) -> broadcast::Receiver { + self.senders + .entry(session) + .or_insert_with(|| broadcast::Sender::new(cap)) + .subscribe() + } + + fn unsubscribe(&mut self, session: &SessionId) { + self.senders.remove(session); + self.watched_areas.retain(|_namespace, sessions| { + sessions.remove(session); + !sessions.is_empty() + }); + } + + fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { + self.watched_areas + .entry(namespace) + .or_default() + .entry(session) + .or_default() + .push(area) + } + + fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { + let Some(sessions) = self.watched_areas.get_mut(&entry.namespace_id()) else { + return; + }; + let mut dropped_receivers = vec![]; + for (session_id, areas) in sessions { + // Do not broadcast back into sessions where the entry came from. + if origin == Origin::Remote(*session_id) { + continue; + } + // Check if the session is watching an area where the entry falls into. + if areas.iter().any(|area| area.includes_entry(entry.entry())) { + if let Some(sender) = self.senders.get(session_id) { + // Send the entry and mark senders with dropped receivers for removal. + if let Err(_err) = sender.send(entry.clone()) { + dropped_receivers.push(*session_id); + } + } + } + } + for session_id in dropped_receivers { + self.unsubscribe(&session_id); + } + } +} diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index a1abd300e3..9aa6da4282 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -17,11 +17,18 @@ pub trait Storage: Clone + 'static { fn payloads(&self) -> &Self::Payloads; } -pub trait SecretStorage: std::fmt::Debug + 'static { +pub trait SecretStorage: std::fmt::Debug + Clone + 'static { fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), SecretStoreError>; fn get_user(&self, id: &UserId) -> Option; fn get_namespace(&self, id: &NamespaceId) -> Option; + fn insert_user(&self, secret: UserSecretKey) -> Result<(), SecretStoreError> { + self.insert(meadowcap::SecretKey::User(secret)) + } + fn insert_namespace(&self, secret: NamespaceSecretKey) -> Result<(), SecretStoreError> { + self.insert(meadowcap::SecretKey::Namespace(secret)) + } + fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { Ok(self .get_user(id) diff --git a/iroh-willow/src/util/codec.rs b/iroh-willow/src/util/codec.rs index fbb9edb7ef..7e8da7f5b4 100644 --- a/iroh-willow/src/util/codec.rs +++ b/iroh-willow/src/util/codec.rs @@ -37,3 +37,41 @@ pub enum DecodeOutcome { consumed: usize, }, } + +pub fn compact_width(value: u64) -> u8 { + if value < 256 { + 1 + } else if value < 256u64.pow(2) { + 2 + } else if value < 256u64.pow(4) { + 4 + } else { + 8 + } +} + +#[derive(Debug, Clone, Copy)] +pub struct CompactWidth(pub u64); + +impl CompactWidth { + fn len(self) -> u8 { + compact_width(self.0) + } +} + +impl Encoder for CompactWidth { + fn encoded_len(&self) -> usize { + self.len() as usize + } + + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + match self.len() { + 1 => out.write_all(&(self.0 as u8).to_be_bytes())?, + 2 => out.write_all(&(self.0 as u16).to_be_bytes())?, + 4 => out.write_all(&(self.0 as u32).to_be_bytes())?, + 8 => out.write_all(&self.0.to_be_bytes())?, + _ => unreachable!("len is always one of the above"), + }; + Ok(()) + } +} diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index dbeaabc146..77dea91243 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -8,7 +8,7 @@ use std::{ }; use futures_concurrency::future::{future_group, FutureGroup}; -use futures_lite::Stream; +use futures_lite::{Stream, StreamExt}; use tokio::task::AbortHandle; use tokio::task::JoinError; @@ -41,7 +41,7 @@ impl Default for JoinMap { } } -impl JoinMap { +impl JoinMap { /// Create a new [`TaskMap`]. pub fn new() -> Self { Self::default() @@ -95,6 +95,11 @@ impl JoinMap { handle.abort(); } } + + pub async fn shutdown(&mut self) { + self.abort_all(); + while self.next().await.is_some() {} + } } impl JoinMap { From 4c995dd7fd8c4d38d25999e3483cd5a17e0d8381 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 01:01:56 +0200 Subject: [PATCH 064/198] wip: auth and delegations --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/actor.rs | 98 ++++++- iroh-willow/src/form.rs | 35 +-- iroh-willow/src/net.rs | 99 +++++-- iroh-willow/src/proto/grouping.rs | 96 +++++- iroh-willow/src/proto/meadowcap.rs | 195 +++++++++---- iroh-willow/src/proto/sync.rs | 8 +- iroh-willow/src/session.rs | 54 +++- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/run.rs | 53 ++-- iroh-willow/src/session/state.rs | 1 + iroh-willow/src/store.rs | 95 ++---- iroh-willow/src/store/auth.rs | 455 ++++++++++++++++++----------- iroh-willow/src/store/traits.rs | 23 +- 15 files changed, 824 insertions(+), 392 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 077eea0704..9c08baf508 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2930,6 +2930,7 @@ dependencies = [ "futures-lite 2.3.0", "futures-util", "genawaiter", + "hex", "iroh-base", "iroh-blobs", "iroh-metrics", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 929fb0ff26..2bcc05261b 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -39,6 +39,7 @@ tokio = { version = "1", features = ["sync"] } tokio-util = { version = "0.7", features = ["io-util", "io"] } tracing = "0.1" zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } +hex = "0.4.3" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index d7fa11e8a3..d1fb668962 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -12,12 +12,13 @@ use crate::{ form::{AuthForm, EntryForm, EntryOrForm}, proto::{ grouping::ThreeDRange, - keys::NamespaceId, - meadowcap, + keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, + meadowcap::{self, AccessMode}, willow::{AuthorisedEntry, Entry}, }, session::{Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit}, store::{ + auth::{CapSelector, CapabilityPack, DelegateTo}, traits::{EntryReader, SecretStorage, Storage}, Origin, Store, }, @@ -33,6 +34,10 @@ pub struct ActorHandle { } impl ActorHandle { + pub fn spawn_memory(payloads: iroh_blobs::store::mem::Store, me: NodeId) -> Self { + Self::spawn(move || crate::store::memory::Store::new(payloads), me) + } + pub fn spawn( create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, @@ -93,11 +98,11 @@ impl ActorHandle { Ok(()) } - pub async fn insert_form( + pub async fn insert_form( &self, form: EntryForm, authorisation: impl Into, - ) -> Result { + ) -> Result<(Entry, bool)> { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InsertEntry { entry: EntryOrForm::Form(form), @@ -150,7 +155,46 @@ impl ActorHandle { reply, }) .await?; + reply_rx.await? + } + pub async fn create_namespace( + &self, + kind: NamespaceKind, + owner: UserId, + ) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::CreateNamespace { kind, owner, reply }) + .await?; + reply_rx.await? + } + + pub async fn create_user(&self) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::CreateUser { reply }).await?; + reply_rx.await? + } + + pub async fn delegate_caps( + &self, + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + ) -> Result> { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::DelegateCaps { + from, + access_mode, + to, + store: false, + reply, + }) + .await?; + reply_rx.await? + } + pub async fn import_caps(&self, caps: Vec) -> Result<()> { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::ImportCaps { caps, reply }).await?; reply_rx.await? } } @@ -234,12 +278,31 @@ pub enum ToActor { InsertEntry { entry: EntryOrForm, auth: AuthForm, - reply: oneshot::Sender>, + reply: oneshot::Sender>, }, InsertSecret { secret: meadowcap::SecretKey, reply: oneshot::Sender>, }, + CreateNamespace { + kind: NamespaceKind, + owner: UserId, + reply: oneshot::Sender>, + }, + CreateUser { + reply: oneshot::Sender>, + }, + ImportCaps { + caps: Vec, + reply: oneshot::Sender>, + }, + DelegateCaps { + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + store: bool, + reply: oneshot::Sender>>, + }, Shutdown { #[debug(skip)] reply: Option>, @@ -384,6 +447,31 @@ impl Actor { let res = self.store.secrets().insert(secret); send_reply(reply, res.map_err(anyhow::Error::from)) } + ToActor::CreateNamespace { kind, owner, reply } => { + let res = self + .store + .create_namespace(&mut rand::thread_rng(), kind, owner); + send_reply(reply, res.map_err(anyhow::Error::from)) + } + ToActor::CreateUser { reply } => { + let secret = UserSecretKey::generate(&mut rand::thread_rng()); + let res = self.store.secrets().insert_user(secret); + send_reply(reply, res.map_err(anyhow::Error::from)) + } + ToActor::ImportCaps { caps, reply } => { + let res = self.store.import_caps(caps); + send_reply(reply, res.map_err(anyhow::Error::from)) + } + ToActor::DelegateCaps { + from, + access_mode, + to, + store, + reply, + } => { + let res = self.store.delegate_cap(from, access_mode, to, store); + send_reply(reply, res.map_err(anyhow::Error::from)) + } } } diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 032935f9f5..6c59857222 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -16,15 +16,9 @@ use tokio::io::AsyncRead; use crate::{ proto::{ keys::UserId, - willow::{ - AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, Timestamp, WriteCapability, - }, - }, - session::Error, - store::{ - traits::{SecretStorage as _, Storage}, - Store, + willow::{Entry, NamespaceId, Path, SubspaceId, Timestamp, WriteCapability}, }, + store::{traits::Storage, Store}, util::time::system_time_now, }; @@ -134,7 +128,7 @@ pub enum HashForm { #[derive(Debug, Clone, Serialize, Deserialize, derive_more::From)] pub enum AuthForm { - Find(UserId), + Any(UserId), // TODO: WriteCapabilityHash Exact(WriteCapability), } @@ -142,31 +136,10 @@ pub enum AuthForm { impl AuthForm { pub fn user_id(&self) -> UserId { match self { - AuthForm::Find(user) => *user, + AuthForm::Any(user) => *user, AuthForm::Exact(cap) => cap.receiver().id(), } } - pub fn into_write_cap(self, _store: &Store) -> Result { - match self { - AuthForm::Find(_) => todo!(), - AuthForm::Exact(cap) => Ok(cap), - } - } - - pub fn resolve_and_attach( - self, - store: &Store, - entry: Entry, - ) -> Result { - let cap = self.into_write_cap(store)?; - let user_id = cap.receiver().id(); - let secret_key = store - .secrets() - .get_user(&user_id) - .ok_or(Error::MissingUserKey(user_id))?; - let entry = entry.attach_authorisation(cap, &secret_key)?; - Ok(entry) - } } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 378f8fd9dd..32c2450e18 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -305,16 +305,23 @@ mod tests { use crate::{ actor::ActorHandle, + form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, net::run, proto::{ grouping::{AreaOfInterest, ThreeDRange}, - keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, + keys::{ + NamespaceId, NamespaceKind, NamespaceSecretKey, UserId, UserPublicKey, + UserSecretKey, + }, meadowcap::{AccessMode, McCapability, OwnedCapability}, sync::ReadCapability, willow::{Entry, InvalidPath, Path, WriteCapability}, }, - session::{Role, SessionInit, SessionMode}, - store::memory, + session::{Interests, Role, SessionInit, SessionMode}, + store::{ + auth::{CapSelector, DelegateTo}, + memory, + }, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -329,37 +336,60 @@ mod tests { let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace_id = namespace_secret.id(); - let start = Instant::now(); let mut expected_entries = BTreeSet::new(); - let (handle_alfie, payloads_alfie) = create_willow(node_id_alfie); - let (handle_betty, payloads_betty) = create_willow(node_id_betty); + let handle_alfie = ActorHandle::spawn_memory(Default::default(), node_id_alfie); + let handle_betty = ActorHandle::spawn_memory(Default::default(), node_id_betty); - let (init_alfie, _) = setup_and_insert( - SessionMode::ReconcileOnce, - &mut rng, + let user_alfie = handle_alfie.create_user().await?; + let user_betty = handle_betty.create_user().await?; + + let namespace_id = handle_alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = handle_alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, None), + ) + .await?; + + handle_betty.import_caps(cap_for_betty).await?; + + insert2( &handle_alfie, - &payloads_alfie, - &namespace_secret, + namespace_id, + user_alfie, n_alfie, - &mut expected_entries, |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), + |n| format!("alfie{n}"), + &mut expected_entries, ) .await?; - let (init_betty, _) = setup_and_insert( - SessionMode::ReconcileOnce, - &mut rng, + + insert2( &handle_betty, - &payloads_betty, - &namespace_secret, + namespace_id, + user_betty, n_betty, - &mut expected_entries, |n| Path::new(&[b"betty", n.to_string().as_bytes()]), + |n| format!("betty{n}"), + &mut expected_entries, ) .await?; + + let init_alfie = SessionInit { + interests: Interests::All, + mode: SessionMode::ReconcileOnce, + }; + let init_betty = SessionInit { + interests: Interests::All, + mode: SessionMode::ReconcileOnce, + }; + info!("init took {:?}", start.elapsed()); let start = Instant::now(); @@ -558,6 +588,32 @@ mod tests { entries } + async fn insert2( + handle: &ActorHandle, + namespace_id: NamespaceId, + user_id: UserId, + count: usize, + path_fn: impl Fn(usize) -> Result, + content_fn: impl Fn(usize) -> String, + track_entries: &mut impl Extend, + ) -> anyhow::Result<()> { + for i in 0..count { + let payload = content_fn(i).as_bytes().to_vec(); + let path = path_fn(i).expect("invalid path"); + let entry = EntryForm { + namespace_id, + subspace_id: SubspaceForm::User, + path, + timestamp: TimestampForm::Now, + payload: PayloadForm::Bytes(payload.into()), + }; + let (entry, inserted) = handle.insert_form(entry, AuthForm::Any(user_id)).await?; + assert!(inserted); + track_entries.extend([entry]); + } + Ok(()) + } + #[allow(clippy::too_many_arguments)] async fn insert( actor: &ActorHandle, @@ -620,7 +676,8 @@ mod tests { track_entries, ) .await?; - let init = SessionInit::with_interest(mode, read_cap, AreaOfInterest::full()); + let init = + SessionInit::with_explicit_interest(mode, read_cap.into(), AreaOfInterest::full()); Ok((init, write_cap)) } diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 09475f3853..b662145851 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -82,6 +82,16 @@ impl ThreeDRange { } } +// pub trait Successor: Sized { +// fn successor(&self) -> Option; +// } +// +// impl Successor for Timestamp { +// fn successor(&self) -> Option { +// self.checked_add(1) +// } +// } + /// Ranges are simple, one-dimensional ways of grouping Entries. /// /// They can express groupings such as “last week’s Entries”. A range is either a closed range or an open range. @@ -97,6 +107,22 @@ pub struct Range { pub end: RangeEnd, } +impl Ord for Range { + fn cmp(&self, other: &Self) -> Ordering { + match self.start.cmp(&other.start) { + Ordering::Less => Ordering::Less, + Ordering::Equal => Ordering::Greater, + Ordering::Greater => self.end.cmp(&other.end), + } + } +} + +impl PartialOrd for Range { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl From<(T, RangeEnd)> for Range { fn from((start, end): (T, RangeEnd)) -> Self { Range { start, end } @@ -207,11 +233,17 @@ impl RangeEnd { impl PartialOrd for RangeEnd { fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for RangeEnd { + fn cmp(&self, other: &Self) -> Ordering { match (self, other) { - (RangeEnd::Open, RangeEnd::Closed(_)) => Some(Ordering::Greater), - (RangeEnd::Closed(_), RangeEnd::Open) => Some(Ordering::Less), - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.partial_cmp(b), - (RangeEnd::Open, RangeEnd::Open) => Some(Ordering::Equal), + (RangeEnd::Open, RangeEnd::Closed(_)) => Ordering::Greater, + (RangeEnd::Closed(_), RangeEnd::Open) => Ordering::Less, + (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.cmp(b), + (RangeEnd::Open, RangeEnd::Open) => Ordering::Equal, } } } @@ -227,7 +259,7 @@ impl RangeEnd { } /// A grouping of Entries that are among the newest in some store. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash, Ord, PartialOrd)] pub struct AreaOfInterest { /// To be included in this AreaOfInterest, an Entry must be included in the area. pub area: Area, @@ -238,6 +270,13 @@ pub struct AreaOfInterest { } impl AreaOfInterest { + pub fn new(area: Area) -> Self { + Self { + area, + max_count: 0, + max_size: 0, + } + } /// Create a new [`AreaOfInterest`] that covers everything. pub fn full() -> Self { Self { @@ -249,7 +288,7 @@ impl AreaOfInterest { } /// A grouping of Entries. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct Area { /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. pub subspace: SubspaceArea, @@ -285,20 +324,19 @@ impl Area { } pub fn includes_entry(&self, entry: &Entry) -> bool { - self.includes_point(&entry.subspace_id, &entry.path, &entry.timestamp) + self.includes(&entry.subspace_id, &entry.path, &entry.timestamp) } - pub fn includes_point( - &self, - subspace_id: &SubspaceId, - path: &Path, - timestamp: &Timestamp, - ) -> bool { + pub fn includes(&self, subspace_id: &SubspaceId, path: &Path, timestamp: &Timestamp) -> bool { self.subspace.includes_subspace(subspace_id) && self.path.is_prefix_of(path) && self.times.includes(timestamp) } + pub fn includes_point(&self, point: &Point) -> bool { + self.includes(&point.subspace_id, &point.path, &point.timestamp) + } + pub fn includes_area(&self, other: &Area) -> bool { self.subspace.includes(&other.subspace) && self.path.is_prefix_of(&other.path) @@ -419,7 +457,7 @@ impl Range { }; } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum SubspaceArea { Any, Id(SubspaceId), @@ -451,6 +489,36 @@ impl SubspaceArea { } } +/// A single point in the 3D range space. +/// +/// I.e. an entry. +#[derive(Debug, Clone)] +pub struct Point { + pub path: Path, + pub timestamp: Timestamp, + pub subspace_id: SubspaceId, +} + +impl Point { + pub fn new(subspace_id: SubspaceId, path: Path, timestamp: Timestamp) -> Self { + Self { + subspace_id, + path, + timestamp, + } + } + pub fn from_entry(entry: &Entry) -> Self { + Self { + path: entry.path.clone(), + timestamp: entry.timestamp, + subspace_id: entry.subspace_id, + } + } + + // pub fn into_area(&self) -> Area { + // } +} + #[derive(thiserror::Error, Debug)] #[error("area is not included in outer area")] pub struct NotIncluded; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 13f996fc37..17f64dd8fb 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,4 +1,3 @@ -use std::cmp::Ordering; use std::io::Write; use serde::{Deserialize, Serialize}; @@ -24,18 +23,6 @@ pub enum SecretKey { Namespace(NamespaceSecretKey), } -// #[derive(Debug, derive_more::From)] -// pub enum PublicKey { -// User(UserPublicKey), -// Namespace(NamespacePublicKey), -// } -// -// #[derive(Debug, derive_more::From)] -// pub enum PublicKeyId { -// User(UserId), -// Namespace(NamespaceId), -// } - pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { let (capability, signature) = token.as_parts(); @@ -218,10 +205,11 @@ impl McCapability { Self::Owned(cap) => cap.is_valid(), } } - pub fn validate(&self) -> Result<(), InvalidCapability> { - match self.is_valid() { - true => Ok(()), - false => Err(InvalidCapability), + // pub fn validate(&self) -> Result<(), InvalidCapability> { + pub fn validate(&self) -> anyhow::Result<()> { + match self { + Self::Communal(cap) => cap.validate(), + Self::Owned(cap) => cap.validate(), } } @@ -232,13 +220,18 @@ impl McCapability { } } - /// Returns `true` if `self` has less delegations or covers a larger area than `other`. + /// Returns `true` if `self` covers a larger area than `other`, + /// or if covers the same area and has less delegations. pub fn is_wider_than(&self, other: &Self) -> bool { - match self.delegations().len().cmp(&other.delegations().len()) { - Ordering::Less => true, - Ordering::Greater => false, - Ordering::Equal => self.granted_area().includes_area(&other.granted_area()), - } + (self.granted_area().includes_area(&other.granted_area())) + || (self.granted_area() == other.granted_area() + && self.delegations().len() < other.delegations().len()) + + // match self.delegations().len().cmp(&other.delegations().len()) { + // Ordering::Less => true, + // Ordering::Greater => false, + // Ordering::Equal => self.granted_area().includes_area(&other.granted_area()), + // } } pub fn delegate( @@ -330,7 +323,7 @@ impl CommunalCapability { Ok(()) } else { let mut prev = None; - let mut prev_receiver = self.receiver(); + let mut prev_receiver = &self.user_key; for delegation in self.delegations.iter() { let (new_area, new_user, new_signature) = &delegation; let signable = self.handover(prev, new_area, new_user)?; @@ -371,10 +364,13 @@ impl CommunalCapability { ) -> anyhow::Result> { match prev { None => self.initial_handover(new_area, new_user), - Some((prev_area, prev_signature)) => { - let handover = Handover::new(prev_area, prev_signature, new_area, new_user)?; - handover.encode() - } + Some((prev_area, prev_signature)) => Handover::new( + prev_area, + PrevSignature::User(prev_signature), + new_area, + new_user, + )? + .encode(), } } @@ -383,7 +379,7 @@ impl CommunalCapability { new_area: &Area, new_user: &UserPublicKey, ) -> anyhow::Result> { - let prev_area = self.granted_area(); + let prev_area = Area::subspace(self.user_key.into()); let area_in_area = AreaInArea::new(new_area, &prev_area)?; let len = 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; @@ -396,7 +392,7 @@ impl CommunalCapability { out.write_all(&self.namespace_key.to_bytes())?; area_in_area.encode_into(&mut out)?; out.write_all(&new_user.to_bytes())?; - todo!() + Ok(out.into_inner()) } } @@ -424,8 +420,8 @@ impl OwnedCapability { access_mode: AccessMode, ) -> Self { let namespace_key = namespace_secret_key.public_key(); - let signable = Self::signable(access_mode, &user_key); - let initial_authorisation = namespace_secret_key.sign(&signable); + let handover = Self::initial_handover(access_mode, &user_key); + let initial_authorisation = namespace_secret_key.sign(&handover); Self { access_mode, namespace_key, @@ -459,31 +455,36 @@ impl OwnedCapability { pub fn validate(&self) -> anyhow::Result<()> { // verify root authorisation - let signable = Self::signable(self.access_mode, &self.user_key); + let handover = Self::initial_handover(self.access_mode, &self.user_key); self.namespace_key - .verify(&signable, &self.initial_authorisation)?; + .verify(&handover, &self.initial_authorisation)?; // no delegations: done if self.delegations.is_empty() { return Ok(()); } + let initial_area = Area::full(); let mut prev = ( - &self.granted_area(), - self.receiver(), + &initial_area, + &self.user_key, PrevSignature::Namespace(&self.initial_authorisation), ); for delegation in self.delegations.iter() { + let (prev_area, prev_user, prev_signature) = prev; let (new_area, new_user, new_signature) = delegation; - let handover = Handover::new(prev.0, prev.2, new_area, new_user)?; - let signable = handover.encode()?; - prev.1.verify(&signable, new_signature)?; - prev = (new_area, new_user, new_signature.into()); + let handover = + Handover::new(prev_area, prev_signature, new_area, new_user)?.encode()?; + prev_user.verify(&handover, new_signature)?; + prev = (new_area, new_user, PrevSignature::User(new_signature)); } Ok(()) } - fn signable(access_mode: AccessMode, user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { + fn initial_handover( + access_mode: AccessMode, + user_key: &UserPublicKey, + ) -> [u8; PUBLIC_KEY_LENGTH + 1] { let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; // https://willowprotocol.org/specs/meadowcap/index.html#owned_cap_valid // An OwnedCapability with zero delegations is valid if initial_authorisation @@ -523,7 +524,7 @@ impl OwnedCapability { } } -#[derive(Debug, derive_more::From)] +#[derive(Debug)] enum PrevSignature<'a> { User(&'a UserSignature), Namespace(&'a NamespaceSignature), @@ -540,8 +541,6 @@ impl<'a> PrevSignature<'a> { #[derive(Debug)] struct Handover<'a> { - // prev_area: &'a Area, - // new_area: &'a Area, prev_signature: PrevSignature<'a>, new_user: &'a UserPublicKey, area_in_area: AreaInArea<'a>, @@ -550,14 +549,14 @@ struct Handover<'a> { impl<'a> Handover<'a> { fn new( prev_area: &'a Area, - prev_signature: impl Into>, + prev_signature: PrevSignature<'a>, new_area: &'a Area, new_user: &'a UserPublicKey, ) -> Result { let area_in_area = AreaInArea::new(new_area, prev_area)?; Ok(Self { area_in_area, - prev_signature: prev_signature.into(), + prev_signature, new_user, }) } @@ -600,26 +599,64 @@ impl McSubspaceCapability { &self.namespace_key } - pub fn validate(&self) -> Result<(), InvalidCapability> { - match self.is_valid() { - true => Ok(()), - false => Err(InvalidCapability), + pub fn validate(&self) -> anyhow::Result<()> { + let signable = Self::initial_handover(&self.user_key); + self.namespace_key + .verify(&signable, &self.initial_authorisation)?; + + if self.delegations.is_empty() { + return Ok(()); } + + let mut prev = ( + &self.user_key, + PrevSignature::Namespace(&self.initial_authorisation), + ); + for delegation in &self.delegations { + let (prev_user, prev_signature) = prev; + let (new_user, new_signature) = delegation; + let handover = Self::handover(prev_signature, new_user); + prev_user.verify(&handover, new_signature)?; + prev = (new_user, PrevSignature::User(new_signature)); + } + Ok(()) } pub fn is_valid(&self) -> bool { - if self.delegations.is_empty() { - let signable = Self::signable(&self.user_key); - self.namespace_key - .verify(&signable, &self.initial_authorisation) - .is_ok() - } else { - // TODO: support delegations - false + self.validate().is_ok() + } + + pub fn delegate( + &self, + secret_key: &UserSecretKey, + new_user: UserPublicKey, + ) -> anyhow::Result { + if secret_key.public_key() != *self.receiver() { + anyhow::bail!("Secret key does not match receiver of current capability"); } + let prev_signature = match self.delegations.last() { + None => PrevSignature::Namespace(&self.initial_authorisation), + Some((_, prev_signature)) => PrevSignature::User(prev_signature), + }; + let handover = Self::handover(prev_signature, &new_user); + let signature = secret_key.sign(&handover); + let delegation = (new_user, signature); + let mut cap = self.clone(); + cap.delegations.push(delegation); + Ok(cap) + } + + fn handover( + prev_signature: PrevSignature, + new_user: &UserPublicKey, + ) -> [u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH] { + let mut out = [0u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH]; + out[..SIGNATURE_LENGTH].copy_from_slice(&prev_signature.to_bytes()); + out[SIGNATURE_LENGTH..].copy_from_slice(new_user.as_bytes()); + out } - fn signable(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { + fn initial_handover(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; // A McSubspaceCapability with zero delegations is valid if initial_authorisation // is a NamespaceSignature issued by the namespace_key over the byte 0x02, @@ -630,3 +667,43 @@ impl McSubspaceCapability { signable } } + +#[cfg(test)] +mod tests { + use rand_core::SeedableRng; + + use crate::proto::{ + grouping::Area, + keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, + }; + + use super::{AccessMode, McCapability}; + + #[test] + fn delegate_owned() { + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); + let alfie_secret = UserSecretKey::generate(&mut rng); + let betty_secret = UserSecretKey::generate(&mut rng); + let alfie_public = alfie_secret.public_key(); + let betty_public = betty_secret.public_key(); + let cap = McCapability::new_owned(namespace_secret, alfie_public, AccessMode::Write); + cap.validate().expect("cap to be valid"); + let cap_betty = cap + .delegate(&alfie_secret, betty_public, Area::full()) + .expect("not to fail"); + cap_betty.validate().expect("cap to be valid"); + let conny_secret = UserSecretKey::generate(&mut rng); + let conny_public = conny_secret.public_key(); + let cap_conny = cap_betty + .delegate( + &betty_secret, + conny_public, + Area::subspace(conny_public.id()), + ) + .expect("not to fail"); + cap_conny.validate().expect("cap to be valid"); + assert_eq!(cap_conny.granted_area(), Area::subspace(conny_public.id())); + assert_eq!(cap_conny.receiver(), &conny_public); + } +} diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 1110cdf3dd..2568ee65d5 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -55,9 +55,15 @@ pub type SyncSignature = meadowcap::UserSignature; pub type Receiver = meadowcap::UserPublicKey; /// Represents an authorisation to read an area of data in a Namespace. -#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq)] pub struct ReadAuthorisation(pub ReadCapability, pub Option); +impl From for ReadAuthorisation { + fn from(value: ReadCapability) -> Self { + Self(value, None) + } +} + impl ReadAuthorisation { pub fn new(read_cap: ReadCapability, subspace_cap: Option) -> Self { Self(read_cap, subspace_cap) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 12b3c23ea3..0c11bb4e61 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,9 +1,11 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; -use crate::proto::grouping::Area; use crate::proto::keys::NamespaceId; -use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}; -use crate::proto::{grouping::AreaOfInterest, sync::ReadCapability}; +use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash, ReadAuthorisation}; +use crate::{ + proto::grouping::{Area, AreaOfInterest}, + store::auth::CapSelector, +}; pub mod channels; mod data; @@ -56,12 +58,14 @@ impl Role { #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum SessionMode { + /// Run a single, full reconciliation, and then quit. ReconcileOnce, + /// Run reconciliations and data mode, until intentionally closed. Live, } impl SessionMode { - fn is_live(&self) -> bool { + pub fn is_live(&self) -> bool { *self == Self::Live } } @@ -70,14 +74,29 @@ impl SessionMode { pub enum Interests { #[default] All, - Some(HashSet), + Some(BTreeMap>), + Explicit(HashMap>), +} + +#[derive(Debug, Default, Clone)] +pub enum Interests2 { + #[default] + All, + Some(Vec<(CapSelector, AreaOfInterestSelector)>), +} + +#[derive(Debug, Default, Clone)] +pub enum AreaOfInterestSelector { + #[default] + Widest, + Exact(BTreeSet), } /// Options to initialize a session with. #[derive(Debug)] pub struct SessionInit { /// List of interests we wish to synchronize, together with our capabilities to read them. - pub interests: HashMap>, + pub interests: Interests, pub mode: SessionMode, } @@ -85,12 +104,29 @@ impl SessionInit { /// Returns a [`SessionInit`] with a single interest. pub fn with_interest( mode: SessionMode, - capability: ReadCapability, + namespace: NamespaceId, + area_of_interest: AreaOfInterest, + ) -> Self { + Self { + mode, + interests: Interests::Some(BTreeMap::from_iter([( + namespace, + BTreeSet::from_iter([area_of_interest]), + )])), + } + } + + pub fn with_explicit_interest( + mode: SessionMode, + authorisation: ReadAuthorisation, area_of_interest: AreaOfInterest, ) -> Self { Self { mode, - interests: HashMap::from_iter([(capability, HashSet::from_iter([area_of_interest]))]), + interests: Interests::Explicit(HashMap::from_iter([( + authorisation, + BTreeSet::from_iter([area_of_interest]), + )])), } } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index aeda22b4fb..d1b7234cdc 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -14,6 +14,8 @@ use crate::{ pub enum Error { #[error("local store failed: {0}")] Store(#[from] anyhow::Error), + #[error("authentication error: {0}")] + Auth(#[from] crate::store::auth::AuthError), #[error("payload store failed: {0}")] PayloadStore(std::io::Error), #[error("payload digest does not match expected digest")] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 3f67053359..516056ba51 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -74,11 +74,13 @@ impl Session { }); // Spawn a task to handle incoming areas of interest. - self.spawn(error_span!("aoi"), move |session| async move { - while let Some(message) = aoi_recv.try_next().await? { - session.on_bind_area_of_interest(message).await?; + self.spawn(error_span!("aoi"), { + move |session| async move { + while let Some(message) = aoi_recv.try_next().await? { + session.on_bind_area_of_interest(message).await?; + } + Ok(()) } - Ok(()) }); // Spawn a task to handle reconciliation messages @@ -106,17 +108,21 @@ impl Session { }); // Spawn a task to handle session termination. - self.spawn(error_span!("fin"), move |session| async move { - // Wait until the session is cancelled: - // * either because SessionMode is ReconcileOnce and reconciliation finished - // * or because the session was cancelled from the outside session handle - cancel_token.cancelled().await; - // Then close all senders. This will make all other tasks terminate once the remote - // closed their senders as well. - session.close_senders(); - // Unsubscribe from the store. This stops the data send task. - store.entries().unsubscribe(session.id()); - Ok(()) + self.spawn(error_span!("fin"), { + let cancel_token = cancel_token.clone(); + move |session| async move { + // Wait until the session is cancelled: + // * either because SessionMode is ReconcileOnce and reconciliation finished + // * or because the session was cancelled from the outside session handle + cancel_token.cancelled().await; + debug!("closing session"); + // Then close all senders. This will make all other tasks terminate once the remote + // closed their senders as well. + session.close_senders(); + // Unsubscribe from the store. This stops the data send task. + store.entries().unsubscribe(session.id()); + Ok(()) + } }); // Wait for all tasks to complete. @@ -125,9 +131,12 @@ impl Session { let mut final_result = Ok(()); while let Some((span, result)) = self.join_next_task().await { let _guard = span.enter(); - trace!(?result, remaining = self.remaining_tasks(), "task complete"); + // trace!(?result, remaining = self.remaining_tasks(), "task complete"); + debug!(?result, remaining = self.remaining_tasks(), "task complete"); if let Err(err) = result { tracing::warn!(?err, "task failed: {err}"); + cancel_token.cancel(); + // self.abort_all_tasks(); if final_result.is_ok() { final_result = Err(err); } @@ -188,14 +197,18 @@ async fn setup( session: Session, init: SessionInit, ) -> Result<(), Error> { - debug!(interests = init.interests.len(), "start setup"); - for (capability, aois) in init.interests.into_iter() { + // debug!(interests = init.interests.len(), "start setup"); + debug!(?init, "start setup"); + let interests = store.auth().resolve_interests(init.interests)?; + debug!(?interests, "found interests"); + for (authorisation, aois) in interests { // TODO: implement private area intersection let intersection_handle = 0.into(); + let read_cap = authorisation.read_cap(); let (our_capability_handle, message) = session.bind_and_sign_capability( store.secrets(), intersection_handle, - capability.clone(), + read_cap.clone(), )?; if let Some(message) = message { session.send(message).await?; @@ -207,7 +220,7 @@ async fn setup( authorisation: our_capability_handle, }; // TODO: We could skip the clone if we re-enabled sending by reference. - session.bind_area_of_interest(Scope::Ours, msg.clone(), &capability)?; + session.bind_area_of_interest(Scope::Ours, msg.clone(), read_cap)?; session.send(msg).await?; } } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 76767e5f23..b2388f5cfb 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -295,6 +295,7 @@ impl Session { pub fn on_setup_bind_read_capability(&self, msg: SetupBindReadCapability) -> Result<(), Error> { // TODO: verify intersection handle + tracing::debug!("setup bind cap {msg:?}"); msg.capability.validate()?; let mut state = self.state_mut(); state diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index aeb15d4c8c..e3fafa60d3 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -4,13 +4,13 @@ use rand_core::CryptoRngCore; use crate::{ form::{AuthForm, EntryOrForm}, proto::{ - grouping::Area, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, meadowcap::AccessMode, + willow::Entry, }, session::Error, store::{ - auth::{AuthError, CapSelector, CapabilityPack}, + auth::{AuthError, CapSelector, CapabilityPack, DelegateTo, UserSelector}, traits::SecretStorage, }, }; @@ -58,7 +58,7 @@ impl Store { &self.auth } - pub async fn insert_entry(&self, entry: EntryOrForm, auth: AuthForm) -> Result { + pub async fn insert_entry(&self, entry: EntryOrForm, auth: AuthForm) -> Result<(Entry, bool)> { let user_id = auth.user_id(); let entry = match entry { EntryOrForm::Entry(entry) => Ok(entry), @@ -66,10 +66,10 @@ impl Store { }?; let capability = match auth { AuthForm::Exact(cap) => cap, - AuthForm::Find(user_id) => { - let selector = CapSelector::for_entry(&entry, user_id); + AuthForm::Any(user_id) => { + let selector = CapSelector::for_entry(&entry, UserSelector::Exact(user_id)); self.auth() - .get_write(selector)? + .get_write_cap(&selector)? .ok_or_else(|| anyhow!("no write capability available"))? } }; @@ -78,10 +78,11 @@ impl Store { .get_user(&user_id) .ok_or(Error::MissingUserKey(user_id))?; let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; - self.entries().ingest(&authorised_entry, Origin::Local) + let inserted = self.entries().ingest(&authorised_entry, Origin::Local)?; + Ok((authorised_entry.into_entry(), inserted)) } - pub fn mint_namespace( + pub fn create_namespace( &self, rng: &mut impl CryptoRngCore, kind: NamespaceKind, @@ -90,71 +91,37 @@ impl Store { let namespace_secret = NamespaceSecretKey::generate(rng, kind); let namespace_id = namespace_secret.id(); self.secrets().insert_namespace(namespace_secret)?; - self.mint_capabilities(namespace_id, owner)?; + self.mint_caps(namespace_id, owner)?; Ok(namespace_id) } - pub fn delegate_capability( + pub fn delegate_cap( &self, - namespace_id: NamespaceId, - prev_user: UserId, + from: CapSelector, access_mode: AccessMode, - new_user: UserId, - new_area: Area, - ) -> anyhow::Result> { - match access_mode { - AccessMode::Write => { - let write_cap = self.auth.delegate( - &self.secrets, - namespace_id, - prev_user, - AccessMode::Write, - new_user, - new_area, - )?; - Ok(vec![write_cap]) - } - AccessMode::Read => { - let write_cap = self.auth.delegate( - &self.secrets, - namespace_id, - prev_user, - AccessMode::Write, - new_user, - new_area.clone(), - )?; - let read_cap = self.auth.delegate( - &self.secrets, - namespace_id, - prev_user, - AccessMode::Read, - new_user, - new_area, - )?; - Ok(vec![write_cap, read_cap]) + to: DelegateTo, + store: bool, + ) -> Result, AuthError> { + self.auth() + .delegate_full_caps(&self.secrets, from, access_mode, to, store) + } + + pub fn import_caps(&self, caps: Vec) -> Result<(), AuthError> { + // Only allow importing caps we can use. + // TODO: Is this what we want? + for cap in &caps { + let user_id = cap.receiver(); + if !self.secrets().has_user(&user_id) { + return Err(AuthError::MissingUserSecret(user_id)); } } + self.auth().insert_caps(caps); + Ok(()) } - fn mint_capabilities( - &self, - namespace_id: NamespaceId, - user_id: UserId, - ) -> Result<(), AuthError> { - self.auth - .mint(&self.secrets, namespace_id, user_id, AccessMode::Read)?; - self.auth - .mint(&self.secrets, namespace_id, user_id, AccessMode::Write)?; + fn mint_caps(&self, namespace_id: NamespaceId, user_id: UserId) -> Result<(), AuthError> { + self.auth() + .create_full_caps(&self.secrets, namespace_id, user_id)?; Ok(()) } - - // pub fn delegate( - // &self, - // namespace_id: NamespaceId, - // access_mode: AccessMode, - // from: UserId, - // area: Area, - // store: bool, - // ) -> Option { - // } } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index a3ba164053..3a7ec0295b 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -1,100 +1,121 @@ use std::{ - collections::HashMap, + collections::{BTreeSet, HashMap}, sync::{Arc, RwLock}, }; use anyhow::Result; +use serde::{Deserialize, Serialize}; +use tracing::debug; use crate::{ proto::{ - grouping::Area, - keys::{NamespaceId, NamespaceKind, UserId, UserPublicKey}, + grouping::{Area, AreaOfInterest, Point}, + keys::{NamespaceId, NamespaceKind, NamespacePublicKey, UserId, UserPublicKey}, meadowcap::{AccessMode, McCapability}, sync::ReadAuthorisation, - willow::{Entry, Path, SubspaceId, Timestamp, WriteCapability}, + willow::{Entry, WriteCapability}, }, + session::Interests, store::traits::{SecretStorage, SecretStoreError}, }; -#[derive(Debug)] +#[derive(Debug, Clone)] +pub struct DelegateTo { + pub user: UserId, + pub restrict_area: Option, +} + +impl DelegateTo { + pub fn new(user: UserId, restrict_area: Option) -> Self { + Self { + user, + restrict_area, + } + } +} + +#[derive(Debug, Clone)] pub struct CapSelector { - user_id: UserId, - namespace_id: NamespaceId, - granted_area: AreaSelector, + pub namespace_id: NamespaceId, + pub user: UserSelector, + pub area: AreaSelector, +} + +impl CapSelector { + pub fn matches(&self, cap: &McCapability) -> bool { + self.namespace_id == cap.granted_namespace().id() + && self.user.includes(&cap.receiver().id()) + && self.area.is_included_in(&cap.granted_area()) + } + + pub fn widest(namespace_id: NamespaceId) -> Self { + Self { + namespace_id, + user: UserSelector::Any, + area: AreaSelector::Widest, + } + } +} + +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize)] +pub enum UserSelector { + #[default] + Any, + Exact(UserId), +} + +impl UserSelector { + fn includes(&self, user: &UserId) -> bool { + match self { + Self::Any => true, + Self::Exact(u) => u == user, + } + } } -#[derive(Debug)] +#[derive(Debug, Clone, Default)] pub enum AreaSelector { + #[default] + Widest, Area(Area), - Point { - subspace_id: SubspaceId, - path: Path, - timestamp: Timestamp, - }, + Point(Point), } impl AreaSelector { - pub fn included_in(&self, other: &Area) -> bool { + pub fn is_included_in(&self, other: &Area) -> bool { match self { + AreaSelector::Widest => true, AreaSelector::Area(area) => other.includes_area(area), - AreaSelector::Point { - subspace_id, - path, - timestamp, - } => other.includes_point(subspace_id, path, timestamp), + AreaSelector::Point(point) => other.includes_point(point), } } } impl CapSelector { - pub fn for_entry(entry: &Entry, user_id: UserId) -> Self { - let granted_area = AreaSelector::Point { - path: entry.path.clone(), - timestamp: entry.timestamp, - subspace_id: entry.subspace_id, - }; + pub fn for_entry(entry: &Entry, user_id: UserSelector) -> Self { + let granted_area = AreaSelector::Point(Point::from_entry(entry)); Self { namespace_id: entry.namespace_id, - user_id, - granted_area, + user: user_id, + area: granted_area, } } } -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub enum CapabilityPack { Read(ReadAuthorisation), Write(WriteCapability), } -// #[derive(Debug)] -// pub enum CapabilityRoot { -// Owned(NamespaceSecretKey), -// Communal(NamespacePublicKey), -// } -// -// impl CapabilityRoot { -// pub fn kind(&self) -> NamespaceKind { -// match self { -// CapabilityRoot::Owned(_) => NamespaceKind::Owned, -// CapabilityRoot::Communal(_) => NamespaceKind::Communal, -// } -// } -// fn for_namespace( -// namespace: NamespacePublicKey, -// secrets: S, -// ) -> Result { -// match namespace.kind() { -// NamespaceKind::Communal => Ok(CapabilityRoot::Communal(namespace)), -// NamespaceKind::Owned => { -// let secret = secrets -// .get_namespace(&namespace.id()) -// .ok_or(AuthError::MissingNamespaceSecret)?; -// Ok(CapabilityRoot::Owned(secret)) -// } -// } -// } -// } +impl CapabilityPack { + pub fn receiver(&self) -> UserId { + match self { + CapabilityPack::Read(auth) => auth.read_cap().receiver().id(), + CapabilityPack::Write(cap) => cap.receiver().id(), + } + } +} #[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)] pub struct CapabilityHash(iroh_base::hash::Hash); @@ -104,94 +125,208 @@ pub struct AuthStore { inner: Arc>, } impl AuthStore { - pub fn get_write(&self, selector: CapSelector) -> Result, AuthError> { - Ok(self.inner.read().unwrap().get_write_authorisation(selector)) + pub fn get_write_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + Ok(self.inner.read().unwrap().get_write_cap(selector)) + } + + pub fn get_read_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + let cap = self.inner.read().unwrap().get_read_cap(selector); + debug!(?selector, ?cap, "get read cap"); + Ok(cap) + } + + pub fn list_read_caps(&self) -> impl Iterator { + self.inner + .read() + .unwrap() + .read_caps + .values() + .flatten() + .cloned() + .collect::>() + .into_iter() } - pub fn get_read(&self, selector: CapSelector) -> Result, AuthError> { - Ok(self.inner.read().unwrap().get_read_authorisation(selector)) + pub fn insert_caps(&self, caps: impl IntoIterator) { + let mut inner = self.inner.write().unwrap(); + for cap in caps.into_iter() { + debug!(?cap, "insert cap"); + inner.insert_caps(cap); + } } - pub fn insert(&self, cap: CapabilityPack) { - self.inner.write().unwrap().insert_capability(cap); + pub fn resolve_interests( + &self, + interests: Interests, + ) -> Result>, AuthError> { + match interests { + Interests::All => { + let out = self + .list_read_caps() + .map(|auth| { + let area = auth.read_cap().granted_area(); + let aoi = AreaOfInterest::new(area); + (auth, BTreeSet::from_iter([aoi])) + }) + .collect::>(); + Ok(out) + } + Interests::Explicit(interests) => Ok(interests), + Interests::Some(interests) => { + let mut out: HashMap> = HashMap::new(); + for (namespace_id, aois) in interests { + for aoi in aois { + let selector = CapSelector { + namespace_id, + user: UserSelector::Any, + area: AreaSelector::Area(aoi.area.clone()), + }; + let cap = self.get_read_cap(&selector)?; + if let Some(cap) = cap { + let set = out.entry(cap).or_default(); + set.insert(aoi); + } + } + } + Ok(out) + } + } } - pub fn mint( + pub fn create_full_caps( &self, secrets: &S, namespace_id: NamespaceId, user_id: UserId, - access_mode: AccessMode, - ) -> Result<(), AuthError> { + ) -> Result<[CapabilityPack; 2], AuthError> { let namespace_key = namespace_id .into_public_key() - .map_err(|_| AuthError::InvalidNamespaceId)?; + .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; let user_key: UserPublicKey = user_id .into_public_key() - .map_err(|_| AuthError::InvalidUserId)?; + .map_err(|_| AuthError::InvalidUserId(user_id))?; + let read_cap = self.create_read_cap(secrets, namespace_key, user_key)?; + let write_cap = self.create_write_cap(secrets, namespace_key, user_key)?; + let pack = [read_cap, write_cap]; + self.insert_caps(pack.clone()); + Ok(pack) + } + + pub fn create_read_cap( + &self, + secrets: &S, + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, + ) -> Result { + let namespace_id = namespace_key.id(); let cap = match namespace_key.kind() { NamespaceKind::Owned => { let namespace_secret = secrets .get_namespace(&namespace_id) - .ok_or(AuthError::MissingNamespaceSecret)?; - McCapability::new_owned(namespace_secret, user_key, access_mode) + .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; + McCapability::new_owned(namespace_secret, user_key, AccessMode::Read) } NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, access_mode) + McCapability::new_communal(namespace_key, user_key, AccessMode::Read) } }; - let pack = match access_mode { - AccessMode::Read => CapabilityPack::Read(ReadAuthorisation::new(cap, None)), - AccessMode::Write => CapabilityPack::Write(cap), - }; - self.insert(pack); - Ok(()) + // TODO: Subspace capability. + let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); + Ok(pack) } - pub fn delegate( + pub fn create_write_cap( &self, secrets: &S, - namespace_id: NamespaceId, - prev_user: UserId, - access_mode: AccessMode, - new_user: UserId, - new_area: Area, + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, ) -> Result { - let new_user_key = new_user - .into_public_key() - .map_err(|_| AuthError::InvalidUserId)?; - let selector = CapSelector { - user_id: prev_user, - namespace_id, - granted_area: AreaSelector::Area(new_area.clone()), - }; - let pack = match access_mode { - AccessMode::Write => { - let cap = self - .get_write(selector)? - .ok_or(AuthError::NoCapabilityFound)?; - let user_secret = secrets - .get_user(&cap.receiver().id()) - .ok_or(AuthError::MissingUserSecret)?; - let new_cap = cap.delegate(&user_secret, new_user_key, new_area)?; - CapabilityPack::Write(new_cap) + let namespace_id = namespace_key.id(); + let cap = match namespace_key.kind() { + NamespaceKind::Owned => { + let namespace_secret = secrets + .get_namespace(&namespace_id) + .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; + McCapability::new_owned(namespace_secret, user_key, AccessMode::Write) } - AccessMode::Read => { - let auth = self - .get_read(selector)? - .ok_or(AuthError::NoCapabilityFound)?; - let ReadAuthorisation(read_cap, _subspace_cap) = auth; - let user_secret = secrets - .get_user(&read_cap.receiver().id()) - .ok_or(AuthError::MissingUserSecret)?; - let new_read_cap = read_cap.delegate(&user_secret, new_user_key, new_area)?; - // TODO: Subspace capability - CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, None)) + NamespaceKind::Communal => { + McCapability::new_communal(namespace_key, user_key, AccessMode::Write) } }; + let pack = CapabilityPack::Write(cap); + Ok(pack) + } + + pub fn delegate_full_caps( + &self, + secrets: &S, + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + store: bool, + ) -> Result, AuthError> { + let mut out = Vec::with_capacity(2); + let user_key: UserPublicKey = to + .user + .into_public_key() + .map_err(|_| AuthError::InvalidUserId(to.user))?; + let restrict_area = to.restrict_area; + let read_cap = self.delegate_read_cap(secrets, &from, user_key, restrict_area.clone())?; + out.push(read_cap); + if access_mode == AccessMode::Write { + let write_cap = self.delegate_write_cap(secrets, &from, user_key, restrict_area)?; + out.push(write_cap); + } + if store { + self.insert_caps(out.clone()); + } + Ok(out) + } + + pub fn delegate_read_cap( + &self, + secrets: &S, + from: &CapSelector, + to: UserPublicKey, + restrict_area: Option, + ) -> Result { + let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; + let ReadAuthorisation(read_cap, _subspace_cap) = auth; + let user_id = read_cap.receiver().id(); + let user_secret = secrets + .get_user(&user_id) + .ok_or(AuthError::MissingUserSecret(user_id))?; + let area = restrict_area.unwrap_or(read_cap.granted_area()); + let new_read_cap = read_cap.delegate(&user_secret, to, area)?; + // TODO: Subspace capability + let new_subspace_cap = None; + let pack = CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); Ok(pack) } + + pub fn delegate_write_cap( + &self, + secrets: &S, + from: &CapSelector, + to: UserPublicKey, + restrict_area: Option, + ) -> Result { + let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; + let user_secret = secrets + .get_user(&cap.receiver().id()) + .ok_or(AuthError::MissingUserSecret(cap.receiver().id()))?; + let area = restrict_area.unwrap_or(cap.granted_area()); + let new_cap = cap.delegate(&user_secret, to, area)?; + Ok(CapabilityPack::Write(new_cap)) + } } + #[derive(Debug, Default)] pub struct Inner { write_caps: HashMap>, @@ -199,91 +334,83 @@ pub struct Inner { } impl Inner { - fn get_write_authorisation(&self, selector: CapSelector) -> Option { + fn get_write_cap(&self, selector: &CapSelector) -> Option { let candidates = self .write_caps .get(&selector.namespace_id) .into_iter() .flatten() - .filter(|cap| { - cap.receiver().id() == selector.user_id - && selector.granted_area.included_in(&cap.granted_area()) - }); + .filter(|cap| selector.matches(cap)); // Select the best candidate, by sorting for - // * smallest number of delegations - // * widest area - let best = candidates.reduce(|prev, next| match next.is_wider_than(prev) { - true => next, - false => prev, - }); + // * first: widest area + // * then: smallest number of delegations + let best = candidates.reduce( + |prev, next| { + if next.is_wider_than(prev) { + next + } else { + prev + } + }, + ); best.cloned() } - fn get_read_authorisation(&self, selector: CapSelector) -> Option { + fn get_read_cap(&self, selector: &CapSelector) -> Option { let candidates = self .read_caps .get(&selector.namespace_id) .into_iter() .flatten() - .filter(|auth| { - let cap = &auth.0; - cap.receiver().id() == selector.user_id - && selector.granted_area.included_in(&cap.granted_area()) - }); + .filter(|auth| selector.matches(auth.read_cap())); // Select the best candidate, by sorting for // * smallest number of delegations // * widest area - let best = candidates.reduce(|prev, next| match next.0.is_wider_than(&prev.0) { - true => next, - false => prev, + let best = candidates.reduce(|prev, next| { + if next.read_cap().is_wider_than(prev.read_cap()) { + next + } else { + prev + } }); best.cloned() } - fn insert_capability(&mut self, cap: CapabilityPack) { + fn insert_caps(&mut self, cap: CapabilityPack) { match cap { - CapabilityPack::Read(_) => todo!(), - CapabilityPack::Write(_) => todo!(), + CapabilityPack::Read(cap) => { + self.read_caps + .entry(cap.read_cap().granted_namespace().id()) + .or_default() + .push(cap); + } + CapabilityPack::Write(cap) => { + self.write_caps + .entry(cap.granted_namespace().id()) + .or_default() + .push(cap); + } } } } -// fn mint_capability( -// namespace_secret: &NamespaceSecretKey, -// user_public_key: UserPublicKey, -// ) -> (ReadCapability, WriteCapability) { -// let read_capability = McCapability::Owned(OwnedCapability::new( -// namespace_secret, -// user_public_key, -// AccessMode::Read, -// )); -// let write_capability = McCapability::Owned(OwnedCapability::new( -// namespace_secret, -// user_public_key, -// AccessMode::Write, -// )); -// (read_capability, write_capability) -// } - #[derive(thiserror::Error, Debug)] pub enum AuthError { - #[error("invalid user id")] - InvalidUserId, - #[error("invalid namespace id")] - InvalidNamespaceId, - #[error("missing user secret")] - MissingUserSecret, - #[error("missing namespace secret")] - MissingNamespaceSecret, - #[error("wrong root token for namespace kind")] - WrongRootToken, + #[error("invalid user id: {}", .0.fmt_short())] + InvalidUserId(UserId), + #[error("invalid namespace id: {}", .0.fmt_short())] + InvalidNamespaceId(NamespaceId), + #[error("missing user secret: {}", .0.fmt_short())] + MissingUserSecret(UserId), + #[error("missing namespace secret: {}", .0.fmt_short())] + MissingNamespaceSecret(NamespaceId), #[error("secret store error: {0}")] SecretStore(#[from] SecretStoreError), #[error("no capability found")] - NoCapabilityFound, + NoCapability, // TODO: remove - #[error("other: {0}")] + #[error("{0}")] Other(#[from] anyhow::Error), } diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 9aa6da4282..52b615c300 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -22,11 +22,26 @@ pub trait SecretStorage: std::fmt::Debug + Clone + 'static { fn get_user(&self, id: &UserId) -> Option; fn get_namespace(&self, id: &NamespaceId) -> Option; - fn insert_user(&self, secret: UserSecretKey) -> Result<(), SecretStoreError> { - self.insert(meadowcap::SecretKey::User(secret)) + fn has_user(&self, id: &UserId) -> bool { + self.get_user(id).is_some() } - fn insert_namespace(&self, secret: NamespaceSecretKey) -> Result<(), SecretStoreError> { - self.insert(meadowcap::SecretKey::Namespace(secret)) + + fn has_namespace(&self, id: &UserId) -> bool { + self.get_user(id).is_some() + } + + fn insert_user(&self, secret: UserSecretKey) -> Result { + let id = secret.id(); + self.insert(meadowcap::SecretKey::User(secret))?; + Ok(id) + } + fn insert_namespace( + &self, + secret: NamespaceSecretKey, + ) -> Result { + let id = secret.id(); + self.insert(meadowcap::SecretKey::Namespace(secret))?; + Ok(id) } fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { From 4d0fc5656e43c522a0a498d474ded7c434e05c96 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 01:22:02 +0200 Subject: [PATCH 065/198] improve module structure, cleanups, renames --- iroh-willow/src/actor.rs | 9 +- iroh-willow/src/auth.rs | 455 +++++++++++++++++++++++++++++++ iroh-willow/src/lib.rs | 1 + iroh-willow/src/net.rs | 6 +- iroh-willow/src/session.rs | 30 +- iroh-willow/src/session/error.rs | 2 +- iroh-willow/src/session/run.rs | 2 +- iroh-willow/src/store.rs | 47 +--- iroh-willow/src/store/auth.rs | 416 ---------------------------- 9 files changed, 489 insertions(+), 479 deletions(-) create mode 100644 iroh-willow/src/auth.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index d1fb668962..e258e05d07 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -9,6 +9,7 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ + auth::{CapSelector, CapabilityPack, DelegateTo}, form::{AuthForm, EntryForm, EntryOrForm}, proto::{ grouping::ThreeDRange, @@ -18,7 +19,6 @@ use crate::{ }, session::{Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit}, store::{ - auth::{CapSelector, CapabilityPack, DelegateTo}, traits::{EntryReader, SecretStorage, Storage}, Origin, Store, }, @@ -459,7 +459,7 @@ impl Actor { send_reply(reply, res.map_err(anyhow::Error::from)) } ToActor::ImportCaps { caps, reply } => { - let res = self.store.import_caps(caps); + let res = self.store.auth().import_caps(caps); send_reply(reply, res.map_err(anyhow::Error::from)) } ToActor::DelegateCaps { @@ -469,7 +469,10 @@ impl Actor { store, reply, } => { - let res = self.store.delegate_cap(from, access_mode, to, store); + let res = self + .store + .auth() + .delegate_full_caps(from, access_mode, to, store); send_reply(reply, res.map_err(anyhow::Error::from)) } } diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs new file mode 100644 index 0000000000..fe4a7712b5 --- /dev/null +++ b/iroh-willow/src/auth.rs @@ -0,0 +1,455 @@ +use std::{ + collections::{BTreeSet, HashMap}, + sync::{Arc, RwLock}, +}; + +use anyhow::Result; +use serde::{Deserialize, Serialize}; +use tracing::debug; + +use crate::{ + proto::{ + grouping::{Area, AreaOfInterest, Point}, + keys::{NamespaceId, NamespaceKind, NamespacePublicKey, UserId, UserPublicKey}, + meadowcap::{AccessMode, McCapability}, + sync::ReadAuthorisation, + willow::{Entry, WriteCapability}, + }, + session::Interests, + store::traits::{SecretStorage, SecretStoreError, Storage}, +}; + +#[derive(Debug, Clone)] +pub struct DelegateTo { + pub user: UserId, + pub restrict_area: Option, +} + +impl DelegateTo { + pub fn new(user: UserId, restrict_area: Option) -> Self { + Self { + user, + restrict_area, + } + } +} + +#[derive(Debug, Clone)] +pub struct CapSelector { + pub namespace_id: NamespaceId, + pub user: UserSelector, + pub area: AreaSelector, +} + +impl CapSelector { + pub fn matches(&self, cap: &McCapability) -> bool { + self.namespace_id == cap.granted_namespace().id() + && self.user.includes(&cap.receiver().id()) + && self.area.is_included_in(&cap.granted_area()) + } + + pub fn widest(namespace_id: NamespaceId) -> Self { + Self { + namespace_id, + user: UserSelector::Any, + area: AreaSelector::Widest, + } + } +} + +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize)] +pub enum UserSelector { + #[default] + Any, + Exact(UserId), +} + +impl UserSelector { + fn includes(&self, user: &UserId) -> bool { + match self { + Self::Any => true, + Self::Exact(u) => u == user, + } + } +} + +#[derive(Debug, Clone, Default)] +pub enum AreaSelector { + #[default] + Widest, + Area(Area), + Point(Point), +} + +impl AreaSelector { + pub fn is_included_in(&self, other: &Area) -> bool { + match self { + AreaSelector::Widest => true, + AreaSelector::Area(area) => other.includes_area(area), + AreaSelector::Point(point) => other.includes_point(point), + } + } +} + +impl CapSelector { + pub fn for_entry(entry: &Entry, user_id: UserSelector) -> Self { + let granted_area = AreaSelector::Point(Point::from_entry(entry)); + Self { + namespace_id: entry.namespace_id, + user: user_id, + area: granted_area, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum CapabilityPack { + Read(ReadAuthorisation), + Write(WriteCapability), +} + +impl CapabilityPack { + pub fn receiver(&self) -> UserId { + match self { + CapabilityPack::Read(auth) => auth.read_cap().receiver().id(), + CapabilityPack::Write(cap) => cap.receiver().id(), + } + } + pub fn validate(&self) -> Result<(), AuthError> { + match self { + CapabilityPack::Read(auth) => { + auth.read_cap().validate()?; + if let Some(subspace_cap) = auth.subspace_cap() { + subspace_cap.validate()?; + } + } + CapabilityPack::Write(cap) => { + cap.validate()?; + } + } + Ok(()) + } +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)] +pub struct CapabilityHash(iroh_base::hash::Hash); + +#[derive(Debug, Clone)] +pub struct Auth { + secrets: S::Secrets, + // TODO: Move to store and trait S::Caps + caps: Arc>, +} +impl Auth { + pub fn new(secrets: S::Secrets) -> Self { + Self { + secrets, + // TODO: persist + caps: Default::default(), + } + } + pub fn get_write_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + Ok(self.caps.read().unwrap().get_write_cap(selector)) + } + + pub fn get_read_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + let cap = self.caps.read().unwrap().get_read_cap(selector); + debug!(?selector, ?cap, "get read cap"); + Ok(cap) + } + + pub fn list_read_caps(&self) -> impl Iterator { + self.caps + .read() + .unwrap() + .read_caps + .values() + .flatten() + .cloned() + .collect::>() + .into_iter() + } + + pub fn import_caps( + &self, + caps: impl IntoIterator, + ) -> Result<(), AuthError> { + for cap in caps.into_iter() { + cap.validate()?; + // Only allow importing caps we can use. + // TODO: Is this what we want? + let user_id = cap.receiver(); + if !self.secrets.has_user(&user_id) { + return Err(AuthError::MissingUserSecret(user_id)); + } + } + Ok(()) + } + + pub fn insert_unchecked(&self, caps: impl IntoIterator) { + let mut store = self.caps.write().unwrap(); + for cap in caps.into_iter() { + debug!(?cap, "insert cap"); + store.insert_caps(cap); + } + } + + pub fn find_read_caps_for_interests( + &self, + interests: Interests, + ) -> Result>, AuthError> { + match interests { + Interests::All => { + let out = self + .list_read_caps() + .map(|auth| { + let area = auth.read_cap().granted_area(); + let aoi = AreaOfInterest::new(area); + (auth, BTreeSet::from_iter([aoi])) + }) + .collect::>(); + Ok(out) + } + Interests::Explicit(interests) => Ok(interests), + Interests::Some(interests) => { + let mut out: HashMap> = HashMap::new(); + for (namespace_id, aois) in interests { + for aoi in aois { + // TODO: check if aoi is already covered before trying to cover it + let selector = CapSelector { + namespace_id, + user: UserSelector::Any, + area: AreaSelector::Area(aoi.area.clone()), + }; + let cap = self.get_read_cap(&selector)?; + if let Some(cap) = cap { + let set = out.entry(cap).or_default(); + set.insert(aoi); + } + } + } + Ok(out) + } + } + } + + pub fn create_full_caps( + &self, + namespace_id: NamespaceId, + user_id: UserId, + ) -> Result<[CapabilityPack; 2], AuthError> { + let namespace_key = namespace_id + .into_public_key() + .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; + let user_key: UserPublicKey = user_id + .into_public_key() + .map_err(|_| AuthError::InvalidUserId(user_id))?; + let read_cap = self.create_read_cap(namespace_key, user_key)?; + let write_cap = self.create_write_cap(namespace_key, user_key)?; + let pack = [read_cap, write_cap]; + self.insert_unchecked(pack.clone()); + Ok(pack) + } + + pub fn create_read_cap( + &self, + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, + ) -> Result { + let namespace_id = namespace_key.id(); + let cap = match namespace_key.kind() { + NamespaceKind::Owned => { + let namespace_secret = self + .secrets + .get_namespace(&namespace_id) + .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; + McCapability::new_owned(namespace_secret, user_key, AccessMode::Read) + } + NamespaceKind::Communal => { + McCapability::new_communal(namespace_key, user_key, AccessMode::Read) + } + }; + // TODO: Subspace capability. + let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); + Ok(pack) + } + + pub fn create_write_cap( + &self, + namespace_key: NamespacePublicKey, + user_key: UserPublicKey, + ) -> Result { + let namespace_id = namespace_key.id(); + let cap = match namespace_key.kind() { + NamespaceKind::Owned => { + let namespace_secret = self + .secrets + .get_namespace(&namespace_id) + .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; + McCapability::new_owned(namespace_secret, user_key, AccessMode::Write) + } + NamespaceKind::Communal => { + McCapability::new_communal(namespace_key, user_key, AccessMode::Write) + } + }; + let pack = CapabilityPack::Write(cap); + Ok(pack) + } + + pub fn delegate_full_caps( + &self, + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + store: bool, + ) -> Result, AuthError> { + let mut out = Vec::with_capacity(2); + let user_key: UserPublicKey = to + .user + .into_public_key() + .map_err(|_| AuthError::InvalidUserId(to.user))?; + let restrict_area = to.restrict_area; + let read_cap = self.delegate_read_cap(&from, user_key, restrict_area.clone())?; + out.push(read_cap); + if access_mode == AccessMode::Write { + let write_cap = self.delegate_write_cap(&from, user_key, restrict_area)?; + out.push(write_cap); + } + if store { + self.insert_unchecked(out.clone()); + } + Ok(out) + } + + pub fn delegate_read_cap( + &self, + from: &CapSelector, + to: UserPublicKey, + restrict_area: Option, + ) -> Result { + let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; + let ReadAuthorisation(read_cap, _subspace_cap) = auth; + let user_id = read_cap.receiver().id(); + let user_secret = self + .secrets + .get_user(&user_id) + .ok_or(AuthError::MissingUserSecret(user_id))?; + let area = restrict_area.unwrap_or(read_cap.granted_area()); + let new_read_cap = read_cap.delegate(&user_secret, to, area)?; + // TODO: Subspace capability + let new_subspace_cap = None; + let pack = CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); + Ok(pack) + } + + pub fn delegate_write_cap( + &self, + from: &CapSelector, + to: UserPublicKey, + restrict_area: Option, + ) -> Result { + let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; + let user_secret = self + .secrets + .get_user(&cap.receiver().id()) + .ok_or(AuthError::MissingUserSecret(cap.receiver().id()))?; + let area = restrict_area.unwrap_or(cap.granted_area()); + let new_cap = cap.delegate(&user_secret, to, area)?; + Ok(CapabilityPack::Write(new_cap)) + } +} + +// TODO: Add trait and move impl to store::memory +#[derive(Debug, Default)] +pub struct CapStore { + write_caps: HashMap>, + read_caps: HashMap>, +} + +impl CapStore { + fn get_write_cap(&self, selector: &CapSelector) -> Option { + let candidates = self + .write_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|cap| selector.matches(cap)); + + // Select the best candidate, by sorting for + // * first: widest area + // * then: smallest number of delegations + let best = candidates.reduce( + |prev, next| { + if next.is_wider_than(prev) { + next + } else { + prev + } + }, + ); + best.cloned() + } + + fn get_read_cap(&self, selector: &CapSelector) -> Option { + let candidates = self + .read_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|auth| selector.matches(auth.read_cap())); + + // Select the best candidate, by sorting for + // * smallest number of delegations + // * widest area + let best = candidates.reduce(|prev, next| { + if next.read_cap().is_wider_than(prev.read_cap()) { + next + } else { + prev + } + }); + best.cloned() + } + + fn insert_caps(&mut self, cap: CapabilityPack) { + match cap { + CapabilityPack::Read(cap) => { + self.read_caps + .entry(cap.read_cap().granted_namespace().id()) + .or_default() + .push(cap); + } + CapabilityPack::Write(cap) => { + self.write_caps + .entry(cap.granted_namespace().id()) + .or_default() + .push(cap); + } + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum AuthError { + #[error("invalid user id: {}", .0.fmt_short())] + InvalidUserId(UserId), + #[error("invalid namespace id: {}", .0.fmt_short())] + InvalidNamespaceId(NamespaceId), + #[error("missing user secret: {}", .0.fmt_short())] + MissingUserSecret(UserId), + #[error("missing namespace secret: {}", .0.fmt_short())] + MissingNamespaceSecret(NamespaceId), + #[error("secret store error: {0}")] + SecretStore(#[from] SecretStoreError), + #[error("no capability found")] + NoCapability, + // TODO: remove + #[error("{0}")] + Other(#[from] anyhow::Error), +} diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 423e49de97..c7ea94a586 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -3,6 +3,7 @@ #![allow(missing_docs)] pub mod actor; +pub mod auth; pub mod engine; pub mod form; pub mod net; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 32c2450e18..1efd4b6e64 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -305,6 +305,7 @@ mod tests { use crate::{ actor::ActorHandle, + auth::{CapSelector, DelegateTo}, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, net::run, proto::{ @@ -318,10 +319,7 @@ mod tests { willow::{Entry, InvalidPath, Path, WriteCapability}, }, session::{Interests, Role, SessionInit, SessionMode}, - store::{ - auth::{CapSelector, DelegateTo}, - memory, - }, + store::memory, }; const ALPN: &[u8] = b"iroh-willow/0"; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 0c11bb4e61..73950eba56 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -3,8 +3,8 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use crate::proto::keys::NamespaceId; use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash, ReadAuthorisation}; use crate::{ + auth::CapSelector, proto::grouping::{Area, AreaOfInterest}, - store::auth::CapSelector, }; pub mod channels; @@ -75,22 +75,24 @@ pub enum Interests { #[default] All, Some(BTreeMap>), + // TODO: Remove? Explicit(HashMap>), } -#[derive(Debug, Default, Clone)] -pub enum Interests2 { - #[default] - All, - Some(Vec<(CapSelector, AreaOfInterestSelector)>), -} - -#[derive(Debug, Default, Clone)] -pub enum AreaOfInterestSelector { - #[default] - Widest, - Exact(BTreeSet), -} +// TODO: I think the interests would be better represented like this maybe? +// #[derive(Debug, Default, Clone)] +// pub enum Interests2 { +// #[default] +// All, +// Some(Vec<(CapSelector, AreaOfInterestSelector)>), +// } +// +// #[derive(Debug, Default, Clone)] +// pub enum AreaOfInterestSelector { +// #[default] +// Widest, +// Exact(BTreeSet), +// } /// Options to initialize a session with. #[derive(Debug)] diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index d1b7234cdc..48ff2b9785 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -15,7 +15,7 @@ pub enum Error { #[error("local store failed: {0}")] Store(#[from] anyhow::Error), #[error("authentication error: {0}")] - Auth(#[from] crate::store::auth::AuthError), + Auth(#[from] crate::auth::AuthError), #[error("payload store failed: {0}")] PayloadStore(std::io::Error), #[error("payload digest does not match expected digest")] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 516056ba51..cea8219c7a 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -199,7 +199,7 @@ async fn setup( ) -> Result<(), Error> { // debug!(interests = init.interests.len(), "start setup"); debug!(?init, "start setup"); - let interests = store.auth().resolve_interests(init.interests)?; + let interests = store.auth().find_read_caps_for_interests(init.interests)?; debug!(?interests, "found interests"); for (authorisation, aois) in interests { // TODO: implement private area intersection diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index e3fafa60d3..f8439feb33 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -2,20 +2,17 @@ use anyhow::{anyhow, Result}; use rand_core::CryptoRngCore; use crate::{ + auth::{Auth, AuthError, CapSelector, UserSelector}, form::{AuthForm, EntryOrForm}, proto::{ keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, - meadowcap::AccessMode, willow::Entry, }, session::Error, - store::{ - auth::{AuthError, CapSelector, CapabilityPack, DelegateTo, UserSelector}, - traits::SecretStorage, - }, + store::traits::SecretStorage, }; -use self::{auth::AuthStore, traits::Storage}; +use self::traits::Storage; pub use self::entry::{Origin, WatchableEntryStore}; @@ -29,7 +26,7 @@ pub struct Store { entries: WatchableEntryStore, secrets: S::Secrets, payloads: S::Payloads, - auth: AuthStore, + auth: Auth, } impl Store { @@ -38,7 +35,7 @@ impl Store { entries: WatchableEntryStore::new(storage.entries().clone()), secrets: storage.secrets().clone(), payloads: storage.payloads().clone(), - auth: Default::default(), + auth: Auth::new(storage.secrets().clone()), } } @@ -54,7 +51,7 @@ impl Store { &self.payloads } - pub fn auth(&self) -> &AuthStore { + pub fn auth(&self) -> &Auth { &self.auth } @@ -91,37 +88,7 @@ impl Store { let namespace_secret = NamespaceSecretKey::generate(rng, kind); let namespace_id = namespace_secret.id(); self.secrets().insert_namespace(namespace_secret)?; - self.mint_caps(namespace_id, owner)?; + self.auth().create_full_caps(namespace_id, owner)?; Ok(namespace_id) } - - pub fn delegate_cap( - &self, - from: CapSelector, - access_mode: AccessMode, - to: DelegateTo, - store: bool, - ) -> Result, AuthError> { - self.auth() - .delegate_full_caps(&self.secrets, from, access_mode, to, store) - } - - pub fn import_caps(&self, caps: Vec) -> Result<(), AuthError> { - // Only allow importing caps we can use. - // TODO: Is this what we want? - for cap in &caps { - let user_id = cap.receiver(); - if !self.secrets().has_user(&user_id) { - return Err(AuthError::MissingUserSecret(user_id)); - } - } - self.auth().insert_caps(caps); - Ok(()) - } - - fn mint_caps(&self, namespace_id: NamespaceId, user_id: UserId) -> Result<(), AuthError> { - self.auth() - .create_full_caps(&self.secrets, namespace_id, user_id)?; - Ok(()) - } } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 3a7ec0295b..e69de29bb2 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -1,416 +0,0 @@ -use std::{ - collections::{BTreeSet, HashMap}, - sync::{Arc, RwLock}, -}; - -use anyhow::Result; -use serde::{Deserialize, Serialize}; -use tracing::debug; - -use crate::{ - proto::{ - grouping::{Area, AreaOfInterest, Point}, - keys::{NamespaceId, NamespaceKind, NamespacePublicKey, UserId, UserPublicKey}, - meadowcap::{AccessMode, McCapability}, - sync::ReadAuthorisation, - willow::{Entry, WriteCapability}, - }, - session::Interests, - store::traits::{SecretStorage, SecretStoreError}, -}; - -#[derive(Debug, Clone)] -pub struct DelegateTo { - pub user: UserId, - pub restrict_area: Option, -} - -impl DelegateTo { - pub fn new(user: UserId, restrict_area: Option) -> Self { - Self { - user, - restrict_area, - } - } -} - -#[derive(Debug, Clone)] -pub struct CapSelector { - pub namespace_id: NamespaceId, - pub user: UserSelector, - pub area: AreaSelector, -} - -impl CapSelector { - pub fn matches(&self, cap: &McCapability) -> bool { - self.namespace_id == cap.granted_namespace().id() - && self.user.includes(&cap.receiver().id()) - && self.area.is_included_in(&cap.granted_area()) - } - - pub fn widest(namespace_id: NamespaceId) -> Self { - Self { - namespace_id, - user: UserSelector::Any, - area: AreaSelector::Widest, - } - } -} - -#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize)] -pub enum UserSelector { - #[default] - Any, - Exact(UserId), -} - -impl UserSelector { - fn includes(&self, user: &UserId) -> bool { - match self { - Self::Any => true, - Self::Exact(u) => u == user, - } - } -} - -#[derive(Debug, Clone, Default)] -pub enum AreaSelector { - #[default] - Widest, - Area(Area), - Point(Point), -} - -impl AreaSelector { - pub fn is_included_in(&self, other: &Area) -> bool { - match self { - AreaSelector::Widest => true, - AreaSelector::Area(area) => other.includes_area(area), - AreaSelector::Point(point) => other.includes_point(point), - } - } -} - -impl CapSelector { - pub fn for_entry(entry: &Entry, user_id: UserSelector) -> Self { - let granted_area = AreaSelector::Point(Point::from_entry(entry)); - Self { - namespace_id: entry.namespace_id, - user: user_id, - area: granted_area, - } - } -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub enum CapabilityPack { - Read(ReadAuthorisation), - Write(WriteCapability), -} - -impl CapabilityPack { - pub fn receiver(&self) -> UserId { - match self { - CapabilityPack::Read(auth) => auth.read_cap().receiver().id(), - CapabilityPack::Write(cap) => cap.receiver().id(), - } - } -} - -#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)] -pub struct CapabilityHash(iroh_base::hash::Hash); - -#[derive(Debug, Default, Clone)] -pub struct AuthStore { - inner: Arc>, -} -impl AuthStore { - pub fn get_write_cap( - &self, - selector: &CapSelector, - ) -> Result, AuthError> { - Ok(self.inner.read().unwrap().get_write_cap(selector)) - } - - pub fn get_read_cap( - &self, - selector: &CapSelector, - ) -> Result, AuthError> { - let cap = self.inner.read().unwrap().get_read_cap(selector); - debug!(?selector, ?cap, "get read cap"); - Ok(cap) - } - - pub fn list_read_caps(&self) -> impl Iterator { - self.inner - .read() - .unwrap() - .read_caps - .values() - .flatten() - .cloned() - .collect::>() - .into_iter() - } - - pub fn insert_caps(&self, caps: impl IntoIterator) { - let mut inner = self.inner.write().unwrap(); - for cap in caps.into_iter() { - debug!(?cap, "insert cap"); - inner.insert_caps(cap); - } - } - - pub fn resolve_interests( - &self, - interests: Interests, - ) -> Result>, AuthError> { - match interests { - Interests::All => { - let out = self - .list_read_caps() - .map(|auth| { - let area = auth.read_cap().granted_area(); - let aoi = AreaOfInterest::new(area); - (auth, BTreeSet::from_iter([aoi])) - }) - .collect::>(); - Ok(out) - } - Interests::Explicit(interests) => Ok(interests), - Interests::Some(interests) => { - let mut out: HashMap> = HashMap::new(); - for (namespace_id, aois) in interests { - for aoi in aois { - let selector = CapSelector { - namespace_id, - user: UserSelector::Any, - area: AreaSelector::Area(aoi.area.clone()), - }; - let cap = self.get_read_cap(&selector)?; - if let Some(cap) = cap { - let set = out.entry(cap).or_default(); - set.insert(aoi); - } - } - } - Ok(out) - } - } - } - - pub fn create_full_caps( - &self, - secrets: &S, - namespace_id: NamespaceId, - user_id: UserId, - ) -> Result<[CapabilityPack; 2], AuthError> { - let namespace_key = namespace_id - .into_public_key() - .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; - let user_key: UserPublicKey = user_id - .into_public_key() - .map_err(|_| AuthError::InvalidUserId(user_id))?; - let read_cap = self.create_read_cap(secrets, namespace_key, user_key)?; - let write_cap = self.create_write_cap(secrets, namespace_key, user_key)?; - let pack = [read_cap, write_cap]; - self.insert_caps(pack.clone()); - Ok(pack) - } - - pub fn create_read_cap( - &self, - secrets: &S, - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - ) -> Result { - let namespace_id = namespace_key.id(); - let cap = match namespace_key.kind() { - NamespaceKind::Owned => { - let namespace_secret = secrets - .get_namespace(&namespace_id) - .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(namespace_secret, user_key, AccessMode::Read) - } - NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::Read) - } - }; - // TODO: Subspace capability. - let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); - Ok(pack) - } - - pub fn create_write_cap( - &self, - secrets: &S, - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - ) -> Result { - let namespace_id = namespace_key.id(); - let cap = match namespace_key.kind() { - NamespaceKind::Owned => { - let namespace_secret = secrets - .get_namespace(&namespace_id) - .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(namespace_secret, user_key, AccessMode::Write) - } - NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::Write) - } - }; - let pack = CapabilityPack::Write(cap); - Ok(pack) - } - - pub fn delegate_full_caps( - &self, - secrets: &S, - from: CapSelector, - access_mode: AccessMode, - to: DelegateTo, - store: bool, - ) -> Result, AuthError> { - let mut out = Vec::with_capacity(2); - let user_key: UserPublicKey = to - .user - .into_public_key() - .map_err(|_| AuthError::InvalidUserId(to.user))?; - let restrict_area = to.restrict_area; - let read_cap = self.delegate_read_cap(secrets, &from, user_key, restrict_area.clone())?; - out.push(read_cap); - if access_mode == AccessMode::Write { - let write_cap = self.delegate_write_cap(secrets, &from, user_key, restrict_area)?; - out.push(write_cap); - } - if store { - self.insert_caps(out.clone()); - } - Ok(out) - } - - pub fn delegate_read_cap( - &self, - secrets: &S, - from: &CapSelector, - to: UserPublicKey, - restrict_area: Option, - ) -> Result { - let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; - let ReadAuthorisation(read_cap, _subspace_cap) = auth; - let user_id = read_cap.receiver().id(); - let user_secret = secrets - .get_user(&user_id) - .ok_or(AuthError::MissingUserSecret(user_id))?; - let area = restrict_area.unwrap_or(read_cap.granted_area()); - let new_read_cap = read_cap.delegate(&user_secret, to, area)?; - // TODO: Subspace capability - let new_subspace_cap = None; - let pack = CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); - Ok(pack) - } - - pub fn delegate_write_cap( - &self, - secrets: &S, - from: &CapSelector, - to: UserPublicKey, - restrict_area: Option, - ) -> Result { - let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; - let user_secret = secrets - .get_user(&cap.receiver().id()) - .ok_or(AuthError::MissingUserSecret(cap.receiver().id()))?; - let area = restrict_area.unwrap_or(cap.granted_area()); - let new_cap = cap.delegate(&user_secret, to, area)?; - Ok(CapabilityPack::Write(new_cap)) - } -} - -#[derive(Debug, Default)] -pub struct Inner { - write_caps: HashMap>, - read_caps: HashMap>, -} - -impl Inner { - fn get_write_cap(&self, selector: &CapSelector) -> Option { - let candidates = self - .write_caps - .get(&selector.namespace_id) - .into_iter() - .flatten() - .filter(|cap| selector.matches(cap)); - - // Select the best candidate, by sorting for - // * first: widest area - // * then: smallest number of delegations - let best = candidates.reduce( - |prev, next| { - if next.is_wider_than(prev) { - next - } else { - prev - } - }, - ); - best.cloned() - } - - fn get_read_cap(&self, selector: &CapSelector) -> Option { - let candidates = self - .read_caps - .get(&selector.namespace_id) - .into_iter() - .flatten() - .filter(|auth| selector.matches(auth.read_cap())); - - // Select the best candidate, by sorting for - // * smallest number of delegations - // * widest area - let best = candidates.reduce(|prev, next| { - if next.read_cap().is_wider_than(prev.read_cap()) { - next - } else { - prev - } - }); - best.cloned() - } - - fn insert_caps(&mut self, cap: CapabilityPack) { - match cap { - CapabilityPack::Read(cap) => { - self.read_caps - .entry(cap.read_cap().granted_namespace().id()) - .or_default() - .push(cap); - } - CapabilityPack::Write(cap) => { - self.write_caps - .entry(cap.granted_namespace().id()) - .or_default() - .push(cap); - } - } - } -} - -#[derive(thiserror::Error, Debug)] -pub enum AuthError { - #[error("invalid user id: {}", .0.fmt_short())] - InvalidUserId(UserId), - #[error("invalid namespace id: {}", .0.fmt_short())] - InvalidNamespaceId(NamespaceId), - #[error("missing user secret: {}", .0.fmt_short())] - MissingUserSecret(UserId), - #[error("missing namespace secret: {}", .0.fmt_short())] - MissingNamespaceSecret(NamespaceId), - #[error("secret store error: {0}")] - SecretStore(#[from] SecretStoreError), - #[error("no capability found")] - NoCapability, - // TODO: remove - #[error("{0}")] - Other(#[from] anyhow::Error), -} From 78224329a6f7fe56b4255d10f7dd388a40f42dbb Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 01:28:16 +0200 Subject: [PATCH 066/198] fmt --- iroh-willow/src/session.rs | 5 +---- iroh-willow/src/store/auth.rs | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 73950eba56..25fd13ca7f 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,11 +1,8 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; +use crate::proto::grouping::{Area, AreaOfInterest}; use crate::proto::keys::NamespaceId; use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash, ReadAuthorisation}; -use crate::{ - auth::CapSelector, - proto::grouping::{Area, AreaOfInterest}, -}; pub mod channels; mod data; diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index e69de29bb2..8b13789179 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -0,0 +1 @@ + From 468f01a24188b668ccd76de6f4d70824ae018fd6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 02:38:29 +0200 Subject: [PATCH 067/198] fix after merging main --- iroh-willow/src/net.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 1efd4b6e64..83a9f9cee8 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -562,7 +562,7 @@ mod tests { .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; - let addr = ep.my_addr().await?; + let addr = ep.node_addr().await?; let node_id = ep.node_id(); Ok((ep, node_id, addr)) } From d31084ee77c27e57e4cdf3eee8c2a53a6979730f Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 10:06:11 +0200 Subject: [PATCH 068/198] fixup --- iroh-willow/src/auth.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index fe4a7712b5..ba8c5560da 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -152,7 +152,9 @@ impl Auth { &self, selector: &CapSelector, ) -> Result, AuthError> { - Ok(self.caps.read().unwrap().get_write_cap(selector)) + let cap = self.caps.read().unwrap().get_write_cap(selector); + debug!(?selector, ?cap, "get write cap"); + Ok(cap) } pub fn get_read_cap( @@ -165,6 +167,7 @@ impl Auth { } pub fn list_read_caps(&self) -> impl Iterator { + // TODO: Less clones? self.caps .read() .unwrap() @@ -180,6 +183,7 @@ impl Auth { &self, caps: impl IntoIterator, ) -> Result<(), AuthError> { + let mut store = self.caps.write().unwrap(); for cap in caps.into_iter() { cap.validate()?; // Only allow importing caps we can use. @@ -188,15 +192,16 @@ impl Auth { if !self.secrets.has_user(&user_id) { return Err(AuthError::MissingUserSecret(user_id)); } + store.insert_cap(cap); } Ok(()) } - pub fn insert_unchecked(&self, caps: impl IntoIterator) { + pub fn insert_caps_unchecked(&self, caps: impl IntoIterator) { let mut store = self.caps.write().unwrap(); for cap in caps.into_iter() { debug!(?cap, "insert cap"); - store.insert_caps(cap); + store.insert_cap(cap); } } @@ -253,7 +258,7 @@ impl Auth { let read_cap = self.create_read_cap(namespace_key, user_key)?; let write_cap = self.create_write_cap(namespace_key, user_key)?; let pack = [read_cap, write_cap]; - self.insert_unchecked(pack.clone()); + self.insert_caps_unchecked(pack.clone()); Ok(pack) } @@ -322,7 +327,7 @@ impl Auth { out.push(write_cap); } if store { - self.insert_unchecked(out.clone()); + self.insert_caps_unchecked(out.clone()); } Ok(out) } @@ -417,7 +422,7 @@ impl CapStore { best.cloned() } - fn insert_caps(&mut self, cap: CapabilityPack) { + fn insert_cap(&mut self, cap: CapabilityPack) { match cap { CapabilityPack::Read(cap) => { self.read_caps From ee4f865bddc93b4561aad54ddb3bac84773ba712 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 21 Jun 2024 10:30:29 +0200 Subject: [PATCH 069/198] improve auth handling --- iroh-willow/src/auth.rs | 52 +++++--- iroh-willow/src/net.rs | 203 ++++++++---------------------- iroh-willow/src/proto/grouping.rs | 6 +- iroh-willow/src/session.rs | 66 +++------- 4 files changed, 106 insertions(+), 221 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index ba8c5560da..54d685a511 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -15,7 +15,7 @@ use crate::{ sync::ReadAuthorisation, willow::{Entry, WriteCapability}, }, - session::Interests, + session::{AreaOfInterestSelector, Interests}, store::traits::{SecretStorage, SecretStoreError, Storage}, }; @@ -45,16 +45,28 @@ impl CapSelector { pub fn matches(&self, cap: &McCapability) -> bool { self.namespace_id == cap.granted_namespace().id() && self.user.includes(&cap.receiver().id()) - && self.area.is_included_in(&cap.granted_area()) + && self.area.matches(&cap.granted_area()) } - pub fn widest(namespace_id: NamespaceId) -> Self { + pub fn new(namespace_id: NamespaceId, user: UserSelector, area: AreaSelector) -> Self { Self { namespace_id, - user: UserSelector::Any, - area: AreaSelector::Widest, + user, + area, } } + + pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { + Self::new( + namespace_id, + UserSelector::Exact(user_id), + AreaSelector::Widest, + ) + } + + pub fn widest(namespace_id: NamespaceId) -> Self { + Self::new(namespace_id, UserSelector::Any, AreaSelector::Widest) + } } #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize)] @@ -82,7 +94,7 @@ pub enum AreaSelector { } impl AreaSelector { - pub fn is_included_in(&self, other: &Area) -> bool { + pub fn matches(&self, other: &Area) -> bool { match self { AreaSelector::Widest => true, AreaSelector::Area(area) => other.includes_area(area), @@ -221,21 +233,23 @@ impl Auth { .collect::>(); Ok(out) } - Interests::Explicit(interests) => Ok(interests), Interests::Some(interests) => { let mut out: HashMap> = HashMap::new(); - for (namespace_id, aois) in interests { - for aoi in aois { - // TODO: check if aoi is already covered before trying to cover it - let selector = CapSelector { - namespace_id, - user: UserSelector::Any, - area: AreaSelector::Area(aoi.area.clone()), - }; - let cap = self.get_read_cap(&selector)?; - if let Some(cap) = cap { - let set = out.entry(cap).or_default(); - set.insert(aoi); + for (cap_selector, aoi_selector) in interests { + let cap = self.get_read_cap(&cap_selector)?; + if let Some(cap) = cap { + let entry = out.entry(cap.clone()).or_default(); + match aoi_selector { + AreaOfInterestSelector::Widest => { + let area = cap.read_cap().granted_area(); + let aoi = AreaOfInterest::new(area); + entry.insert(aoi); + } + AreaOfInterestSelector::Exact(aois) => { + for aoi in aois { + entry.insert(aoi); + } + } } } } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 83a9f9cee8..17b6f792fb 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -297,10 +297,8 @@ mod tests { use futures_lite::StreamExt; use iroh_base::key::SecretKey; - use iroh_blobs::store::Store as PayloadStore; use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; - use rand_core::CryptoRngCore; use tracing::info; use crate::{ @@ -309,17 +307,12 @@ mod tests { form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, net::run, proto::{ - grouping::{AreaOfInterest, ThreeDRange}, - keys::{ - NamespaceId, NamespaceKind, NamespaceSecretKey, UserId, UserPublicKey, - UserSecretKey, - }, - meadowcap::{AccessMode, McCapability, OwnedCapability}, - sync::ReadCapability, - willow::{Entry, InvalidPath, Path, WriteCapability}, + grouping::ThreeDRange, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + willow::{Entry, InvalidPath, Path}, }, session::{Interests, Role, SessionInit, SessionMode}, - store::memory, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -357,7 +350,7 @@ mod tests { handle_betty.import_caps(cap_for_betty).await?; - insert2( + insert( &handle_alfie, namespace_id, user_alfie, @@ -368,7 +361,7 @@ mod tests { ) .await?; - insert2( + insert( &handle_betty, namespace_id, user_betty, @@ -379,14 +372,8 @@ mod tests { ) .await?; - let init_alfie = SessionInit { - interests: Interests::All, - mode: SessionMode::ReconcileOnce, - }; - let init_betty = SessionInit { - interests: Interests::All, - mode: SessionMode::ReconcileOnce, - }; + let init_alfie = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); + let init_betty = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); info!("init took {:?}", start.elapsed()); @@ -442,35 +429,49 @@ mod tests { let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace_id = namespace_secret.id(); + let handle_alfie = ActorHandle::spawn_memory(Default::default(), node_id_alfie); + let handle_betty = ActorHandle::spawn_memory(Default::default(), node_id_betty); - let start = Instant::now(); - let mut expected_entries = BTreeSet::new(); + let user_alfie = handle_alfie.create_user().await?; + let user_betty = handle_betty.create_user().await?; - let (handle_alfie, payloads_alfie) = create_willow(node_id_alfie); - let (handle_betty, payloads_betty) = create_willow(node_id_betty); + let namespace_id = handle_alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; - let (init_alfie, cap_alfie) = setup_and_insert( - SessionMode::Live, - &mut rng, + let cap_for_betty = handle_alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, None), + ) + .await?; + + handle_betty.import_caps(cap_for_betty).await?; + + let mut expected_entries = BTreeSet::new(); + let start = Instant::now(); + + let n_init = 2; + insert( &handle_alfie, - &payloads_alfie, - &namespace_secret, - 2, + namespace_id, + user_alfie, + n_init, + |n| Path::new(&[b"alfie-init", n.to_string().as_bytes()]), + |n| format!("alfie{n}"), &mut expected_entries, - |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), ) .await?; - let (init_betty, _cap_betty) = setup_and_insert( - SessionMode::Live, - &mut rng, + + insert( &handle_betty, - &payloads_betty, - &namespace_secret, - 2, + namespace_id, + user_betty, + n_init, + |n| Path::new(&[b"betty-init", n.to_string().as_bytes()]), + |n| format!("betty{n}"), &mut expected_entries, - |n| Path::new(&[b"betty", n.to_string().as_bytes()]), ) .await?; @@ -488,23 +489,21 @@ mod tests { // alfie insert 3 enries after waiting a second let _insert_task_alfie = tokio::task::spawn({ - let store = handle_alfie.clone(); - let payload_store = payloads_alfie.clone(); + let handle_alfie = handle_alfie.clone(); let count = 3; - let content_fn = |i: usize| format!("alfie live insert {i} for alfie"); + let content_fn = |i: usize| format!("alfie live {i}"); let path_fn = |i: usize| Path::new(&[b"alfie-live", i.to_string().as_bytes()]); let mut track_entries = vec![]; async move { tokio::time::sleep(std::time::Duration::from_secs(1)).await; insert( - &store, - &payload_store, + &handle_alfie, namespace_id, - cap_alfie, + user_alfie, count, - content_fn, path_fn, + content_fn, &mut track_entries, ) .await @@ -513,6 +512,9 @@ mod tests { } }); + let init_alfie = SessionInit::new(Interests::All, SessionMode::Live); + let init_betty = SessionInit::new(Interests::All, SessionMode::Live); + let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, @@ -567,13 +569,6 @@ mod tests { Ok((ep, node_id, addr)) } - pub fn create_willow(me: NodeId) -> (ActorHandle, iroh_blobs::store::mem::Store) { - let payloads = iroh_blobs::store::mem::Store::default(); - let payloads_clone = payloads.clone(); - let handle = ActorHandle::spawn(move || memory::Store::new(payloads_clone), me); - (handle, payloads) - } - async fn get_entries( store: &ActorHandle, namespace: NamespaceId, @@ -586,7 +581,7 @@ mod tests { entries } - async fn insert2( + async fn insert( handle: &ActorHandle, namespace_id: NamespaceId, user_id: UserId, @@ -612,102 +607,6 @@ mod tests { Ok(()) } - #[allow(clippy::too_many_arguments)] - async fn insert( - actor: &ActorHandle, - payload_store: &P, - namespace_id: NamespaceId, - write_cap: WriteCapability, - count: usize, - content_fn: impl Fn(usize) -> String, - path_fn: impl Fn(usize) -> Result, - track_entries: &mut impl Extend, - ) -> anyhow::Result<()> { - for i in 0..count { - let payload = content_fn(i).as_bytes().to_vec(); - let payload_len = payload.len() as u64; - let temp_tag = payload_store - .import_bytes(payload.into(), iroh_base::hash::BlobFormat::Raw) - .await?; - let payload_digest = *temp_tag.hash(); - let path = path_fn(i).expect("invalid path"); - let entry = Entry::new_current( - namespace_id, - write_cap.receiver().id(), - path, - payload_digest, - payload_len, - ); - track_entries.extend([entry.clone()]); - actor.insert_entry(entry, write_cap.clone()).await?; - drop(temp_tag); - } - Ok(()) - } - - #[allow(clippy::too_many_arguments)] - async fn setup_and_insert( - mode: SessionMode, - rng: &mut impl CryptoRngCore, - store: &ActorHandle, - payload_store: &P, - namespace_secret: &NamespaceSecretKey, - count: usize, - track_entries: &mut impl Extend, - path_fn: impl Fn(usize) -> Result, - ) -> anyhow::Result<(SessionInit, WriteCapability)> { - let (read_cap, write_cap) = setup_capabilities(rng, store, namespace_secret).await?; - let content_fn = |i| { - format!( - "initial entry {i} for {}", - write_cap.receiver().id().fmt_short() - ) - }; - insert( - store, - payload_store, - namespace_secret.id(), - write_cap.clone(), - count, - content_fn, - path_fn, - track_entries, - ) - .await?; - let init = - SessionInit::with_explicit_interest(mode, read_cap.into(), AreaOfInterest::full()); - Ok((init, write_cap)) - } - - async fn setup_capabilities( - rng: &mut impl CryptoRngCore, - store: &ActorHandle, - namespace_secret: &NamespaceSecretKey, - ) -> anyhow::Result<(ReadCapability, WriteCapability)> { - let user_secret = UserSecretKey::generate(rng); - let user_public_key = user_secret.public_key(); - store.insert_secret(user_secret.clone()).await?; - let (read_cap, write_cap) = create_capabilities(namespace_secret, user_public_key); - Ok((read_cap, write_cap)) - } - - fn create_capabilities( - namespace_secret: &NamespaceSecretKey, - user_public_key: UserPublicKey, - ) -> (ReadCapability, WriteCapability) { - let read_capability = McCapability::Owned(OwnedCapability::new( - namespace_secret, - user_public_key, - AccessMode::Read, - )); - let write_capability = McCapability::Owned(OwnedCapability::new( - namespace_secret, - user_public_key, - AccessMode::Write, - )); - (read_capability, write_capability) - } - fn parse_env_var(var: &str, default: T) -> T where T: std::str::FromStr, diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index b662145851..5da7297133 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -515,8 +515,10 @@ impl Point { } } - // pub fn into_area(&self) -> Area { - // } + pub fn into_area(&self) -> Area { + let times = Range::new(self.timestamp, RangeEnd::Closed(self.timestamp + 1)); + Area::new(SubspaceArea::Id(self.subspace_id), self.path.clone(), times) + } } #[derive(thiserror::Error, Debug)] diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 25fd13ca7f..7db3a95db3 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,8 +1,13 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; -use crate::proto::grouping::{Area, AreaOfInterest}; -use crate::proto::keys::NamespaceId; -use crate::proto::sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash, ReadAuthorisation}; +use crate::{ + auth::CapSelector, + proto::{ + grouping::{Area, AreaOfInterest}, + keys::NamespaceId, + sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}, + }, +}; pub mod channels; mod data; @@ -71,25 +76,15 @@ impl SessionMode { pub enum Interests { #[default] All, - Some(BTreeMap>), - // TODO: Remove? - Explicit(HashMap>), + Some(BTreeMap), } -// TODO: I think the interests would be better represented like this maybe? -// #[derive(Debug, Default, Clone)] -// pub enum Interests2 { -// #[default] -// All, -// Some(Vec<(CapSelector, AreaOfInterestSelector)>), -// } -// -// #[derive(Debug, Default, Clone)] -// pub enum AreaOfInterestSelector { -// #[default] -// Widest, -// Exact(BTreeSet), -// } +#[derive(Debug, Default, Clone)] +pub enum AreaOfInterestSelector { + #[default] + Widest, + Exact(BTreeSet), +} /// Options to initialize a session with. #[derive(Debug)] @@ -100,33 +95,8 @@ pub struct SessionInit { } impl SessionInit { - /// Returns a [`SessionInit`] with a single interest. - pub fn with_interest( - mode: SessionMode, - namespace: NamespaceId, - area_of_interest: AreaOfInterest, - ) -> Self { - Self { - mode, - interests: Interests::Some(BTreeMap::from_iter([( - namespace, - BTreeSet::from_iter([area_of_interest]), - )])), - } - } - - pub fn with_explicit_interest( - mode: SessionMode, - authorisation: ReadAuthorisation, - area_of_interest: AreaOfInterest, - ) -> Self { - Self { - mode, - interests: Interests::Explicit(HashMap::from_iter([( - authorisation, - BTreeSet::from_iter([area_of_interest]), - )])), - } + pub fn new(interests: Interests, mode: SessionMode) -> Self { + Self { interests, mode } } } From b38487f8b621f8e9219a6a4d52a9a1425ad23527 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 26 Jun 2024 10:56:07 +0200 Subject: [PATCH 070/198] refactor: improve session shutdown --- iroh-willow/src/auth.rs | 4 +- iroh-willow/src/proto/sync.rs | 12 +++ iroh-willow/src/session/data.rs | 15 +--- iroh-willow/src/session/reconciler.rs | 17 ++-- iroh-willow/src/session/run.rs | 109 +++++++++++++++----------- iroh-willow/src/session/state.rs | 77 ++++++++++++++---- iroh-willow/src/util.rs | 1 + iroh-willow/src/util/channel.rs | 14 ++-- iroh-willow/src/util/queue.rs | 4 +- iroh-willow/src/util/stream.rs | 48 ++++++++++++ iroh-willow/src/util/task.rs | 3 +- 11 files changed, 207 insertions(+), 97 deletions(-) create mode 100644 iroh-willow/src/util/stream.rs diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index 54d685a511..23d71a631f 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -165,7 +165,7 @@ impl Auth { selector: &CapSelector, ) -> Result, AuthError> { let cap = self.caps.read().unwrap().get_write_cap(selector); - debug!(?selector, ?cap, "get write cap"); + // debug!(?selector, ?cap, "get write cap"); Ok(cap) } @@ -174,7 +174,7 @@ impl Auth { selector: &CapSelector, ) -> Result, AuthError> { let cap = self.caps.read().unwrap().get_read_cap(selector); - debug!(?selector, ?cap, "get read cap"); + // debug!(?selector, ?cap, "get read cap"); Ok(cap) } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 2568ee65d5..62b53e9c3b 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -338,6 +338,18 @@ impl Message { pub fn same_kind(&self, other: &Self) -> bool { std::mem::discriminant(self) == std::mem::discriminant(other) } + + pub fn covers_region(&self) -> Option<(AreaOfInterestHandle, u64)> { + match self { + Message::ReconciliationSendFingerprint(msg) => { + msg.covers.map(|covers| (msg.receiver_handle, covers)) + } + Message::ReconciliationAnnounceEntries(msg) => { + msg.covers.map(|covers| (msg.receiver_handle, covers)) + } + _ => None, + } + } } impl Encoder for Message { diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 2b2575b4c3..d067667a75 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -1,5 +1,3 @@ -use futures_lite::StreamExt; - use tokio::sync::broadcast; use crate::{ @@ -11,7 +9,6 @@ use crate::{ store::{traits::Storage, Origin, Store}, }; -use super::channels::MessageReceiver; use super::payload::{send_payload_chunked, CurrentPayload}; use super::Session; @@ -81,26 +78,18 @@ pub struct DataReceiver { session: Session, store: Store, current_payload: CurrentPayload, - recv: MessageReceiver, } impl DataReceiver { - pub fn new(session: Session, store: Store, recv: MessageReceiver) -> Self { + pub fn new(session: Session, store: Store) -> Self { Self { session, store, current_payload: Default::default(), - recv, - } - } - pub async fn run(mut self) -> Result<(), Error> { - while let Some(message) = self.recv.try_next().await? { - self.on_message(message).await?; } - Ok(()) } - async fn on_message(&mut self, message: DataMessage) -> Result<(), Error> { + pub async fn on_message(&mut self, message: DataMessage) -> Result<(), Error> { match message { DataMessage::SendEntry(message) => self.on_send_entry(message).await?, DataMessage::SendPayload(message) => self.on_send_payload(message).await?, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 1ecb7a9a26..5f05c6f179 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -21,14 +21,14 @@ use crate::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Origin, Store, }, - util::channel::WriteError, + util::{channel::WriteError, stream::Cancelable}, }; #[derive(derive_more::Debug)] pub struct Reconciler { session: Session, store: Store, - recv: MessageReceiver, + recv: Cancelable>, snapshot: ::Snapshot, current_payload: CurrentPayload, } @@ -37,7 +37,7 @@ impl Reconciler { pub fn new( session: Session, store: Store, - recv: MessageReceiver, + recv: Cancelable>, ) -> Result { let snapshot = store.entries().snapshot()?; Ok(Self { @@ -68,11 +68,8 @@ impl Reconciler { } } } - if self.session.reconciliation_is_complete() - && !self.session.mode().is_live() - && !self.current_payload.is_active() - { - debug!("reconciliation complete and not in live mode: close session"); + if self.session.reconciliation_is_complete() && !self.current_payload.is_active() { + debug!("reconciliation complete"); break; } } @@ -222,7 +219,7 @@ impl Reconciler { their_handle: AreaOfInterestHandle, covers: Option, ) -> anyhow::Result<()> { - self.session.mark_range_pending(our_handle); + self.session.mark_our_range_pending(our_handle); let msg = ReconciliationSendFingerprint { range, fingerprint, @@ -259,7 +256,7 @@ impl Reconciler { covers, }; if want_response { - self.session.mark_range_pending(our_handle); + self.session.mark_our_range_pending(our_handle); } self.send(msg).await?; for authorised_entry in self diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index cea8219c7a..57b56c05bb 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,13 +1,13 @@ use futures_lite::StreamExt; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, trace}; +use tracing::{debug, error_span, trace, warn}; use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, store::{traits::Storage, Store}, - util::channel::Receiver, + util::{channel::Receiver, stream::Cancelable}, }; use super::{ @@ -32,13 +32,21 @@ impl Session { logical_recv: LogicalChannelReceivers { reconciliation_recv, - mut static_tokens_recv, - mut capability_recv, - mut aoi_recv, + static_tokens_recv, + capability_recv, + aoi_recv, data_recv, }, } = recv; + // Make all our receivers close once the cancel_token is triggered. + let control_recv = Cancelable::new(control_recv, cancel_token.clone()); + let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); + let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); + let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); + let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); + let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); + // Spawn a task to handle incoming static tokens. self.spawn(error_span!("stt"), move |session| async move { while let Some(message) = static_tokens_recv.try_next().await? { @@ -52,7 +60,10 @@ impl Session { self.spawn(error_span!("dat:r"), { let store = store.clone(); move |session| async move { - DataReceiver::new(session, store, data_recv).run().await?; + let mut data_receiver = DataReceiver::new(session, store); + while let Some(message) = data_recv.try_next().await? { + data_receiver.on_message(message).await?; + } Ok(()) } }); @@ -74,13 +85,11 @@ impl Session { }); // Spawn a task to handle incoming areas of interest. - self.spawn(error_span!("aoi"), { - move |session| async move { - while let Some(message) = aoi_recv.try_next().await? { - session.on_bind_area_of_interest(message).await?; - } - Ok(()) + self.spawn(error_span!("aoi"), move |session| async move { + while let Some(message) = aoi_recv.try_next().await? { + session.on_bind_area_of_interest(message).await?; } + Ok(()) }); // Spawn a task to handle reconciliation messages @@ -88,18 +97,21 @@ impl Session { let cancel_token = cancel_token.clone(); let store = store.clone(); move |session| async move { - let res = Reconciler::new(session, store, reconciliation_recv)? + let res = Reconciler::new(session.clone(), store, reconciliation_recv)? .run() .await; - cancel_token.cancel(); + if !session.mode().is_live() { + debug!("reconciliation complete and not in live mode: close session"); + cancel_token.cancel(); + } res } }); // Spawn a task to handle control messages self.spawn(error_span!("ctl"), { - let cancel_token = cancel_token.clone(); let store = store.clone(); + let cancel_token = cancel_token.clone(); move |session| async move { let res = control_loop(session, store, control_recv, init).await; cancel_token.cancel(); @@ -107,50 +119,55 @@ impl Session { } }); - // Spawn a task to handle session termination. - self.spawn(error_span!("fin"), { - let cancel_token = cancel_token.clone(); - move |session| async move { - // Wait until the session is cancelled: - // * either because SessionMode is ReconcileOnce and reconciliation finished - // * or because the session was cancelled from the outside session handle - cancel_token.cancelled().await; - debug!("closing session"); - // Then close all senders. This will make all other tasks terminate once the remote - // closed their senders as well. - session.close_senders(); - // Unsubscribe from the store. This stops the data send task. - store.entries().unsubscribe(session.id()); - Ok(()) + // Wait until the session is cancelled, or until a task fails. + let result = loop { + tokio::select! { + _ = cancel_token.cancelled() => { + break Ok(()); + }, + Some((span, result)) = self.join_next_task() => { + let _guard = span.enter(); + trace!(?result, remaining = self.remaining_tasks(), "task complete"); + if let Err(err) = result { + warn!(?err, "session task failed: abort session"); + break Err(err); + } + }, } - }); + }; + + if result.is_err() { + self.abort_all_tasks(); + } else { + debug!("closing session"); + } - // Wait for all tasks to complete. - // We are not cancelling here so we have to make sure that all tasks terminate (structured - // concurrency basically). - let mut final_result = Ok(()); + // Unsubscribe from the store. This stops the data send task. + store.entries().unsubscribe(self.id()); + + // Wait for remaining tasks to terminate to catch any panics. + // TODO: Add timeout? while let Some((span, result)) = self.join_next_task().await { let _guard = span.enter(); - // trace!(?result, remaining = self.remaining_tasks(), "task complete"); - debug!(?result, remaining = self.remaining_tasks(), "task complete"); + trace!(?result, remaining = self.remaining_tasks(), "task complete"); if let Err(err) = result { - tracing::warn!(?err, "task failed: {err}"); - cancel_token.cancel(); - // self.abort_all_tasks(); - if final_result.is_ok() { - final_result = Err(err); - } + warn!("task failed: {err:?}"); } } - debug!(success = final_result.is_ok(), "session complete"); - final_result + + // Close our channel senders. + // This will stop the network send loop after all pending data has been sent. + self.close_senders(); + + debug!(success = result.is_ok(), "session complete"); + result } } async fn control_loop( session: Session, store: Store, - mut control_recv: Receiver, + mut control_recv: Cancelable>, init: SessionInit, ) -> Result<(), Error> { debug!(role = ?session.our_role(), "start session"); diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index b2388f5cfb..2323ade36d 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -8,7 +8,7 @@ use std::{ }; use futures_lite::Stream; -use tracing::{Instrument, Span}; +use tracing::{debug, trace, Instrument, Span}; use crate::{ proto::{ @@ -103,9 +103,19 @@ impl Session { self.0.tasks.borrow_mut().abort_all(); } - pub fn remaining_tasks(&self) -> usize { + // pub fn remaining_tasks(&self) -> usize { + // let tasks = self.0.tasks.borrow(); + // tasks.len() + // } + + pub fn remaining_tasks(&self) -> String { let tasks = self.0.tasks.borrow(); - tasks.len() + let mut out = vec![]; + for (span, _k) in tasks.iter() { + let name = span.metadata().unwrap().name(); + out.push(name.to_string()); + } + out.join(",") } pub fn log_remaining_tasks(&self) { @@ -114,10 +124,21 @@ impl Session { .iter() .map(|t| t.0.metadata().unwrap().name()) .collect::>(); - tracing::debug!(tasks=?names, "active_tasks"); + debug!(tasks=?names, "active_tasks"); } pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { + let message: Message = message.into(); + if let Some((their_handle, range_count)) = message.covers_region() { + if let Err(err) = self + .state_mut() + .mark_their_range_covered(their_handle, range_count) + { + // TODO: Is this really unreachable? I think so, as this would indicate a logic + // error purely on our side. + unreachable!("mark_their_range_covered: {err:?}"); + } + } self.0.send.send(message).await } @@ -196,7 +217,7 @@ impl Session { Ok((our_handle, maybe_message)) } - pub fn mark_range_pending(&self, our_handle: AreaOfInterestHandle) { + pub fn mark_our_range_pending(&self, our_handle: AreaOfInterestHandle) { let mut state = self.state_mut(); state.reconciliation_started = true; let range_count = state.our_range_counter; @@ -211,7 +232,7 @@ impl Session { let range_count = { let mut state = self.state_mut(); if let Some(range_count) = message.covers { - state.mark_range_covered(message.receiver_handle, range_count)?; + state.mark_our_range_covered(message.receiver_handle, range_count)?; } if state.pending_announced_entries.is_some() { return Err(Error::InvalidMessageInCurrentState); @@ -220,8 +241,7 @@ impl Session { state.pending_announced_entries = Some(message.count); } if message.want_response { - let range_count = state.their_range_counter; - state.their_range_counter += 1; + let range_count = state.add_pending_range_theirs(message.sender_handle); Some(range_count) } else { None @@ -245,11 +265,9 @@ impl Session { let mut state = self.state_mut(); state.reconciliation_started = true; if let Some(range_count) = message.covers { - state.mark_range_covered(message.receiver_handle, range_count)?; + state.mark_our_range_covered(message.receiver_handle, range_count)?; } - let range_count = state.their_range_counter; - state.their_range_counter += 1; - range_count + state.add_pending_range_theirs(message.sender_handle) }; let namespace = self @@ -295,7 +313,7 @@ impl Session { pub fn on_setup_bind_read_capability(&self, msg: SetupBindReadCapability) -> Result<(), Error> { // TODO: verify intersection handle - tracing::debug!("setup bind cap {msg:?}"); + trace!("received capability {msg:?}"); msg.capability.validate()?; let mut state = self.state_mut(); state @@ -308,14 +326,16 @@ impl Session { pub fn reconciliation_is_complete(&self) -> bool { let state = self.state(); // tracing::debug!( - // "reconciliation_is_complete started {} pending_ranges {}, pending_entries {:?} mode {:?}", + // "reconciliation_is_complete started {} our_pending_ranges {}, their_pending_ranges {}, pending_entries {:?} mode {:?}", // state.reconciliation_started, // state.our_uncovered_ranges.len(), + // state.their_uncovered_ranges.len(), // state.pending_announced_entries, // self.mode(), // ); state.reconciliation_started && state.our_uncovered_ranges.is_empty() + && state.their_uncovered_ranges.is_empty() && state.pending_announced_entries.is_none() } @@ -457,6 +477,7 @@ struct SessionState { our_range_counter: u64, their_range_counter: u64, our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, + their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, pending_announced_entries: Option, intersection_queue: Queue, } @@ -476,6 +497,7 @@ impl SessionState { our_range_counter: 0, their_range_counter: 0, our_uncovered_ranges: Default::default(), + their_uncovered_ranges: Default::default(), pending_announced_entries: Default::default(), intersection_queue: Default::default(), } @@ -542,7 +564,7 @@ impl SessionState { Ok(()) } - fn mark_range_covered( + fn mark_our_range_covered( &mut self, our_handle: AreaOfInterestHandle, range_count: u64, @@ -553,4 +575,29 @@ impl SessionState { Ok(()) } } + + fn mark_their_range_covered( + &mut self, + their_handle: AreaOfInterestHandle, + range_count: u64, + ) -> Result<(), Error> { + // trace!(?their_handle, ?range_count, "mark_their_range_covered"); + if !self + .their_uncovered_ranges + .remove(&(their_handle, range_count)) + { + Err(Error::InvalidMessageInCurrentState) + } else { + Ok(()) + } + } + + fn add_pending_range_theirs(&mut self, their_handle: AreaOfInterestHandle) -> u64 { + let range_count = self.their_range_counter; + self.their_range_counter += 1; + // debug!(?their_handle, ?range_count, "add_pending_range_theirs"); + self.their_uncovered_ranges + .insert((their_handle, range_count)); + range_count + } } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index ff1d8002ba..f417fb773f 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -3,5 +3,6 @@ pub mod channel; pub mod codec; pub mod queue; +pub mod stream; pub mod task; pub mod time; diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 87f56a0229..b86d179860 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -181,13 +181,13 @@ impl Shared { } fn writable_slice_exact(&mut self, len: usize) -> Option<&mut [u8]> { - tracing::trace!( - "write {}, remaining {} (guarantees {}, buf capacity {})", - len, - self.remaining_write_capacity(), - self.guarantees.get(), - self.max_buffer_size - self.buf.len() - ); + // tracing::trace!( + // "write {}, remaining {} (guarantees {}, buf capacity {})", + // len, + // self.remaining_write_capacity(), + // self.guarantees.get(), + // self.max_buffer_size - self.buf.len() + // ); if self.remaining_write_capacity() < len { None } else { diff --git a/iroh-willow/src/util/queue.rs b/iroh-willow/src/util/queue.rs index b131c9edbe..325cece9b4 100644 --- a/iroh-willow/src/util/queue.rs +++ b/iroh-willow/src/util/queue.rs @@ -8,10 +8,10 @@ use std::{ use futures_lite::Stream; -/// A simple unbounded queue. +/// A simple unbounded FIFO queue. /// /// Values are pushed into the queue, synchronously. -/// The queue can be polled for the next value from the start. +/// The queue can be polled for the next value asynchronously. #[derive(Debug)] pub struct Queue { items: VecDeque, diff --git a/iroh-willow/src/util/stream.rs b/iroh-willow/src/util/stream.rs new file mode 100644 index 0000000000..3ac9cc6776 --- /dev/null +++ b/iroh-willow/src/util/stream.rs @@ -0,0 +1,48 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures_lite::Stream; +use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; + +/// Wrapper around [`Stream`] that takes a cancel token to cancel the stream. +/// +/// Once the cancel token is cancelled, this stream will continue to yield all items which are +/// ready immediately and then return [`None`]. +#[derive(Debug)] +pub struct Cancelable { + stream: S, + cancelled: Pin>, + is_cancelled: bool, +} + +impl Cancelable { + pub fn new(stream: S, cancel_token: CancellationToken) -> Self { + Self { + stream, + cancelled: Box::pin(cancel_token.cancelled_owned()), + is_cancelled: false, + } + } +} + +impl Stream for Cancelable { + type Item = S::Item; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.is_cancelled { + return Poll::Ready(None); + } + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Ready(r) => Poll::Ready(r), + Poll::Pending => match Pin::new(&mut self.cancelled).poll(cx) { + Poll::Ready(()) => { + self.is_cancelled = true; + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + }, + } + } +} diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index 77dea91243..f6394303a0 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -51,8 +51,7 @@ impl JoinMap { pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { let handle = tokio::task::spawn_local(future); let abort_handle = handle.abort_handle(); - let k = self.tasks.insert(handle); - let k = TaskKey(k); + let k = TaskKey(self.tasks.insert(handle)); self.keys.insert(k, key); self.abort_handles.insert(k, abort_handle); k From 525a900f312dcb337c20ac04bc73fe58823449b7 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 27 Jun 2024 16:43:47 +0200 Subject: [PATCH 071/198] refactor(meadowcap): ord and partialord for capabilities, and delegation type --- iroh-willow/src/proto/keys.rs | 24 +++++++++++++++++++ iroh-willow/src/proto/meadowcap.rs | 37 +++++++++++++++++------------- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index e88cc21d6e..98ad061ff2 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -343,6 +343,18 @@ impl From<&UserSecretKey> for UserPublicKey { #[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct NamespaceSignature(ed25519_dalek::Signature); +impl PartialOrd for NamespaceSignature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for NamespaceSignature { + fn cmp(&self, other: &Self) -> Ordering { + self.to_bytes().cmp(&other.to_bytes()) + } +} + bytestring!(NamespaceSignature, SIGNATURE_LENGTH); impl std::hash::Hash for NamespaceSignature { @@ -355,6 +367,18 @@ impl std::hash::Hash for NamespaceSignature { #[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct UserSignature(ed25519_dalek::Signature); +impl PartialOrd for UserSignature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for UserSignature { + fn cmp(&self, other: &Self) -> Ordering { + self.to_bytes().cmp(&other.to_bytes()) + } +} + bytestring!(UserSignature, SIGNATURE_LENGTH); impl std::hash::Hash for UserSignature { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 17f64dd8fb..85bd4d37be 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -134,7 +134,9 @@ impl ValidatedCapability { } } -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] +#[derive( + Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, derive_more::From, +)] pub enum McCapability { Communal(CommunalCapability), Owned(OwnedCapability), @@ -261,14 +263,14 @@ impl Encoder for McCapability { } } -#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum AccessMode { Read, Write, } /// A capability that authorizes reads or writes in communal namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct CommunalCapability { /// The kind of access this grants. access_mode: AccessMode, @@ -298,7 +300,7 @@ impl CommunalCapability { pub fn receiver(&self) -> &UserPublicKey { match self.delegations.last() { None => &self.user_key, - Some((_, user_key, _)) => user_key, + Some(Delegation(_, user_key, _)) => user_key, } } @@ -309,7 +311,7 @@ impl CommunalCapability { pub fn granted_area(&self) -> Area { match self.delegations.last() { None => Area::subspace(self.user_key.into()), - Some((area, _, _)) => area.clone(), + Some(Delegation(area, _, _)) => area.clone(), } } @@ -325,7 +327,7 @@ impl CommunalCapability { let mut prev = None; let mut prev_receiver = &self.user_key; for delegation in self.delegations.iter() { - let (new_area, new_user, new_signature) = &delegation; + let Delegation(new_area, new_user, new_signature) = &delegation; let signable = self.handover(prev, new_area, new_user)?; prev_receiver.verify(&signable, new_signature)?; prev = Some((new_area, new_signature)); @@ -347,10 +349,10 @@ impl CommunalCapability { let prev = self .delegations .last() - .map(|(area, _user_key, sig)| (area, sig)); + .map(|Delegation(area, _user_key, sig)| (area, sig)); let handover = self.handover(prev, &new_area, &new_user)?; let signature = user_secret.sign(&handover); - let delegation = (new_area, new_user, signature); + let delegation = Delegation(new_area, new_user, signature); let mut cap = self.clone(); cap.delegations.push(delegation); Ok(cap) @@ -396,10 +398,11 @@ impl CommunalCapability { } } -pub type Delegation = (Area, UserPublicKey, UserSignature); +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] +pub struct Delegation(Area, UserPublicKey, UserSignature); /// A capability that authorizes reads or writes in owned namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct OwnedCapability { /// The kind of access this grants. access_mode: AccessMode, @@ -434,7 +437,7 @@ impl OwnedCapability { pub fn receiver(&self) -> &UserPublicKey { match self.delegations.last() { None => &self.user_key, - Some((_, user_key, _)) => user_key, + Some(Delegation(_, user_key, _)) => user_key, } } @@ -445,7 +448,7 @@ impl OwnedCapability { pub fn granted_area(&self) -> Area { match self.delegations.last() { None => Area::full(), - Some((area, _, _)) => area.clone(), + Some(Delegation(area, _, _)) => area.clone(), } } @@ -472,7 +475,7 @@ impl OwnedCapability { ); for delegation in self.delegations.iter() { let (prev_area, prev_user, prev_signature) = prev; - let (new_area, new_user, new_signature) = delegation; + let Delegation(new_area, new_user, new_signature) = delegation; let handover = Handover::new(prev_area, prev_signature, new_area, new_user)?.encode()?; prev_user.verify(&handover, new_signature)?; @@ -511,13 +514,13 @@ impl OwnedCapability { } let prev_signature = match self.delegations.last() { None => PrevSignature::Namespace(&self.initial_authorisation), - Some((_, _, prev_signature)) => PrevSignature::User(prev_signature), + Some(Delegation(_, _, prev_signature)) => PrevSignature::User(prev_signature), }; let prev_area = self.granted_area(); let handover = Handover::new(&prev_area, prev_signature, &new_area, &new_user)?; let signable = handover.encode()?; let signature = secret_key.sign(&signable); - let delegation = (new_area, new_user, signature); + let delegation = Delegation(new_area, new_user, signature); let mut cap = self.clone(); cap.delegations.push(delegation); Ok(cap) @@ -574,7 +577,9 @@ impl<'a> Encoder for Handover<'a> { } } -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From)] +#[derive( + Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From, Ord, PartialOrd, +)] /// A capability that certifies read access to arbitrary SubspaceIds at some unspecified Path. pub struct McSubspaceCapability { /// The namespace for which this grants access. From 4ab46d6a3fbfa7c537095b9e6df242447439998a Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 26 Jun 2024 11:21:20 +0200 Subject: [PATCH 072/198] wip: prepare for private area intersection --- iroh-willow/src/net.rs | 3 + iroh-willow/src/proto/sync.rs | 114 ++++++++++++++++++++++++---- iroh-willow/src/session/channels.rs | 7 +- iroh-willow/src/session/run.rs | 1 + 4 files changed, 108 insertions(+), 17 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 17b6f792fb..1d6420a137 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -174,6 +174,7 @@ async fn open_logical_channels( .ok_or(MissingChannel(channel)) }; + let pai = take_and_spawn_channel(LogicalChannel::Intersection)?; let rec = take_and_spawn_channel(LogicalChannel::Reconciliation)?; let stt = take_and_spawn_channel(LogicalChannel::StaticToken)?; let aoi = take_and_spawn_channel(LogicalChannel::AreaOfInterest)?; @@ -182,6 +183,7 @@ async fn open_logical_channels( Ok(( LogicalChannelSenders { + intersection: pai.0, reconciliation: rec.0, static_tokens: stt.0, aoi: aoi.0, @@ -189,6 +191,7 @@ async fn open_logical_channels( data: dat.0, }, LogicalChannelReceivers { + intersection_recv: pai.1.into(), reconciliation_recv: rec.1.into(), static_tokens_recv: stt.1.into(), aoi_recv: aoi.1.into(), diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 62b53e9c3b..06418ad5eb 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -147,6 +147,8 @@ impl Channel { strum::EnumCount, )] pub enum LogicalChannel { + /// Logical channel for controlling the binding of new IntersectionHandles. + Intersection, /// Logical channel for performing 3d range-based set reconciliation. Reconciliation, // TODO: use all the channels @@ -183,6 +185,7 @@ impl LogicalChannel { } pub fn fmt_short(&self) -> &'static str { match self { + LogicalChannel::Intersection => "Pai", LogicalChannel::Reconciliation => "Rec", LogicalChannel::StaticToken => "StT", LogicalChannel::Capability => "Cap", @@ -193,22 +196,24 @@ impl LogicalChannel { pub fn from_id(id: u8) -> Result { match id { - 2 => Ok(Self::AreaOfInterest), - 3 => Ok(Self::Capability), - 4 => Ok(Self::StaticToken), - 5 => Ok(Self::Reconciliation), - 6 => Ok(Self::Data), + 2 => Ok(Self::Intersection), + 3 => Ok(Self::AreaOfInterest), + 4 => Ok(Self::Capability), + 5 => Ok(Self::StaticToken), + 6 => Ok(Self::Reconciliation), + 7 => Ok(Self::Data), _ => Err(InvalidChannelId), } } pub fn id(&self) -> u8 { match self { - LogicalChannel::AreaOfInterest => 2, - LogicalChannel::Capability => 3, - LogicalChannel::StaticToken => 4, - LogicalChannel::Reconciliation => 5, - LogicalChannel::Data => 6, + LogicalChannel::Intersection => 2, + LogicalChannel::AreaOfInterest => 3, + LogicalChannel::Capability => 4, + LogicalChannel::StaticToken => 5, + LogicalChannel::Reconciliation => 6, + LogicalChannel::Data => 7, } } } @@ -292,10 +297,14 @@ pub struct CommitmentReveal { pub enum Message { #[debug("{:?}", _0)] CommitmentReveal(CommitmentReveal), - // PaiReplyFragment - // PaiBindFragment - // PaiRequestSubspaceCapability - // PaiReplySubspaceCapability + #[debug("{:?}", _0)] + PaiReplyFragment(PaiReplyFragment), + #[debug("{:?}", _0)] + PaiBindFragment(PaiBindFragment), + #[debug("{:?}", _0)] + PaiRequestSubspaceCapability(PaiRequestSubspaceCapability), + #[debug("{:?}", _0)] + PaiReplySubspaceCapability(PaiReplySubspaceCapability), #[debug("{:?}", _0)] SetupBindStaticToken(SetupBindStaticToken), #[debug("{:?}", _0)] @@ -395,6 +404,10 @@ impl Decoder for Message { impl Message { pub fn channel(&self) -> Channel { match self { + Message::PaiBindFragment(_) | Message::PaiReplyFragment(_) => { + Channel::Logical(LogicalChannel::Intersection) + } + Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), @@ -412,6 +425,8 @@ impl Message { | Message::DataSetMetadata(_) => Channel::Logical(LogicalChannel::Data), Message::CommitmentReveal(_) + | Message::PaiRequestSubspaceCapability(_) + | Message::PaiReplySubspaceCapability(_) | Message::ControlIssueGuarantee(_) | Message::ControlAbsolve(_) | Message::ControlPlead(_) @@ -456,7 +471,7 @@ impl Message { // } // } // } -// + #[derive(Debug, derive_more::From, strum::Display)] pub enum ReconciliationMessage { SendFingerprint(ReconciliationSendFingerprint), @@ -465,6 +480,7 @@ pub enum ReconciliationMessage { SendPayload(ReconciliationSendPayload), TerminatePayload(ReconciliationTerminatePayload), } + impl TryFrom for ReconciliationMessage { type Error = (); fn try_from(message: Message) -> Result { @@ -478,6 +494,7 @@ impl TryFrom for ReconciliationMessage { } } } + impl From for Message { fn from(message: ReconciliationMessage) -> Self { match message { @@ -496,6 +513,7 @@ pub enum DataMessage { SendPayload(DataSendPayload), SetMetadata(DataSetMetadata), } + impl TryFrom for DataMessage { type Error = (); fn try_from(message: Message) -> Result { @@ -507,6 +525,7 @@ impl TryFrom for DataMessage { } } } + impl From for Message { fn from(message: DataMessage) -> Self { match message { @@ -516,7 +535,33 @@ impl From for Message { } } } -// + +#[derive(Debug, derive_more::From, strum::Display)] +pub enum IntersectionMessage { + BindFragment(PaiBindFragment), + ReplyFragment(PaiReplyFragment), +} + +impl TryFrom for IntersectionMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::PaiBindFragment(msg) => Ok(msg.into()), + Message::PaiReplyFragment(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} + +impl From for Message { + fn from(message: IntersectionMessage) -> Self { + match message { + IntersectionMessage::BindFragment(msg) => msg.into(), + IntersectionMessage::ReplyFragment(msg) => msg.into(), + } + } +} + // impl Encoder for ReconciliationMessage { // fn encoded_len(&self) -> usize { // Message::from(se) @@ -827,3 +872,40 @@ pub struct ControlFreeHandle { mine: bool, handle_type: HandleType, } + +type PsiGroup = (); +/// Bind data to an IntersectionHandle for performing private area intersection. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiBindFragment { + /// The result of first applying hash_into_group to some fragment for private area intersection and then performing scalar multiplication with scalar. + group_member: PsiGroup, + /// Set to true if the private set intersection item is a secondary fragment. + is_secondary: bool, +} + +/// Finalise private set intersection for a single item. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiReplyFragment { + /// The IntersectionHandle of the PaiBindFragment message which this finalises. + handle: IntersectionHandle, + /// The result of performing scalar multiplication between the group_member of the message that this is replying to and scalar. + group_member: PsiGroup, +} + +/// Ask the receiver to send a SubspaceCapability. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiRequestSubspaceCapability { + /// The IntersectionHandle bound by the sender for the least-specific secondary fragment for whose NamespaceId to request the SubspaceCapability. + handle: IntersectionHandle, +} + +/// Send a previously requested SubspaceCapability. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiReplySubspaceCapability { + /// The handle of the PaiRequestSubspaceCapability message that this answers (hence, an IntersectionHandle bound by the receiver of this message). + handle: IntersectionHandle, + /// A SubspaceCapability whose granted namespace corresponds to the request this answers. + capability: SubspaceCapability, + /// The SyncSubspaceSignature issued by the receiver of the capability over the sender’s challenge. + signature: SyncSignature, +} diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index eaada31164..85531288bd 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -10,7 +10,7 @@ use tracing::trace; use crate::{ proto::sync::{ - Channel, DataMessage, LogicalChannel, Message, ReconciliationMessage, + Channel, DataMessage, IntersectionMessage, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, }, util::channel::{Receiver, Sender, WriteError}, @@ -66,6 +66,7 @@ impl> From> for MessageReceiver { #[derive(Debug)] pub struct LogicalChannelReceivers { + pub intersection_recv: MessageReceiver, pub reconciliation_recv: MessageReceiver, pub static_tokens_recv: MessageReceiver, pub capability_recv: MessageReceiver, @@ -75,6 +76,7 @@ pub struct LogicalChannelReceivers { impl LogicalChannelReceivers { pub fn close(&self) { + self.intersection_recv.close(); self.reconciliation_recv.close(); self.static_tokens_recv.close(); self.capability_recv.close(); @@ -85,6 +87,7 @@ impl LogicalChannelReceivers { #[derive(Debug, Clone)] pub struct LogicalChannelSenders { + pub intersection: Sender, pub reconciliation: Sender, pub static_tokens: Sender, pub aoi: Sender, @@ -93,6 +96,7 @@ pub struct LogicalChannelSenders { } impl LogicalChannelSenders { pub fn close(&self) { + self.intersection.close(); self.reconciliation.close(); self.static_tokens.close(); self.aoi.close(); @@ -102,6 +106,7 @@ impl LogicalChannelSenders { pub fn get(&self, channel: LogicalChannel) -> &Sender { match channel { + LogicalChannel::Intersection => &self.intersection, LogicalChannel::Reconciliation => &self.reconciliation, LogicalChannel::StaticToken => &self.static_tokens, LogicalChannel::Capability => &self.capability, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 57b56c05bb..3f2dad41ca 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -36,6 +36,7 @@ impl Session { capability_recv, aoi_recv, data_recv, + intersection_recv, }, } = recv; From b5a2ddf8d13a69b7bb1c6a27f69eb46a157f6cf9 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 27 Jun 2024 16:47:39 +0200 Subject: [PATCH 073/198] feat: private area intersection --- Cargo.lock | 4 + iroh-willow/Cargo.toml | 2 + iroh-willow/src/actor.rs | 10 +- iroh-willow/src/auth.rs | 11 +- iroh-willow/src/net.rs | 13 +- iroh-willow/src/proto/sync.rs | 29 +- iroh-willow/src/proto/willow.rs | 13 + iroh-willow/src/session.rs | 3 +- iroh-willow/src/session/error.rs | 11 + iroh-willow/src/session/pai.rs | 602 ++++++++++++++++++++++++++ iroh-willow/src/session/reconciler.rs | 6 +- iroh-willow/src/session/resource.rs | 25 +- iroh-willow/src/session/run.rs | 131 +++--- iroh-willow/src/session/state.rs | 159 ++++++- 14 files changed, 914 insertions(+), 105 deletions(-) create mode 100644 iroh-willow/src/session/pai.rs diff --git a/Cargo.lock b/Cargo.lock index 530c347b85..94c67f1bd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1020,7 +1020,9 @@ dependencies = [ "curve25519-dalek-derive", "digest", "fiat-crypto", + "rand_core", "rustc_version", + "serde", "subtle", "zeroize", ] @@ -2927,6 +2929,7 @@ version = "0.18.0" dependencies = [ "anyhow", "bytes", + "curve25519-dalek", "derive_more", "ed25519-dalek", "flume", @@ -2947,6 +2950,7 @@ dependencies = [ "rand_core", "redb 2.1.0", "serde", + "sha2", "strum 0.26.2", "tempfile", "test-strategy", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 83c42ce85c..1be238597f 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -40,6 +40,8 @@ tokio-util = { version = "0.7", features = ["io-util", "io"] } tracing = "0.1" zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } hex = "0.4.3" +curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core", "serde"] } +sha2 = "0.10.8" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index e258e05d07..a5d41b0354 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -389,13 +389,18 @@ impl Actor { } => { let Channels { send, recv } = channels; let id = self.next_session_id(); - let session = Session::new(id, init.mode, our_role, send, initial_transmission); + let session = + Session::new(&self.store, id, our_role, send, init, initial_transmission); + let session = match session { + Ok(session) => session, + Err(err) => return send_reply(reply, Err(err.into())), + }; let store = self.store.clone(); let cancel_token = CancellationToken::new(); let future = session - .run(store, recv, init, cancel_token.clone()) + .run(store, recv, cancel_token.clone()) .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); @@ -481,6 +486,7 @@ impl Actor { fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { let session = self.sessions.remove(session_id); if let Some(session) = session { + debug!(?session, ?result, "complete session"); session.on_finish.send(result).ok(); self.session_tasks.remove(&session.task_key); } else { diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index 23d71a631f..d069dbbc81 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeSet, HashMap}, + collections::{BTreeMap, BTreeSet, HashMap}, sync::{Arc, RwLock}, }; @@ -19,6 +19,8 @@ use crate::{ store::traits::{SecretStorage, SecretStoreError, Storage}, }; +pub type InterestMap = BTreeMap>; + #[derive(Debug, Clone)] pub struct DelegateTo { pub user: UserId, @@ -220,7 +222,7 @@ impl Auth { pub fn find_read_caps_for_interests( &self, interests: Interests, - ) -> Result>, AuthError> { + ) -> Result { match interests { Interests::All => { let out = self @@ -230,11 +232,12 @@ impl Auth { let aoi = AreaOfInterest::new(area); (auth, BTreeSet::from_iter([aoi])) }) - .collect::>(); + .collect::>(); Ok(out) } Interests::Some(interests) => { - let mut out: HashMap> = HashMap::new(); + let mut out: BTreeMap> = + BTreeMap::new(); for (cap_selector, aoi_selector) in interests { let cap = self.get_read_cap(&cap_selector)?; if let Some(cap) = cap { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 1d6420a137..ae3995fce3 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -105,7 +105,10 @@ impl SessionHandle { pub async fn join(&mut self) -> anyhow::Result<()> { let session_res = self.handle.on_finish().await; let net_tasks_res = join_all(&mut self.tasks).await; - session_res.or(net_tasks_res) + match session_res { + Err(err) => Err(err.into()), + Ok(()) => net_tasks_res, + } } } @@ -232,7 +235,7 @@ async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> a let max_buffer_size = channel_writer.max_buffer_size(); while let Some(buf) = recv_stream.read_chunk(max_buffer_size, true).await? { channel_writer.write_all(&buf.bytes[..]).await?; - trace!(len = buf.bytes.len(), "recv"); + // trace!(len = buf.bytes.len(), "recv"); } channel_writer.close(); trace!("close"); @@ -241,9 +244,9 @@ async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> a async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyhow::Result<()> { while let Some(data) = channel_reader.read_bytes().await { - let len = data.len(); + // let len = data.len(); send_stream.write_chunk(data).await?; - trace!(len, "sent"); + // trace!(len, "sent"); } send_stream.finish().await?; trace!("close"); @@ -279,7 +282,7 @@ async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result< let mut joined = 0; while let Some(res) = join_set.join_next().await { joined += 1; - tracing::trace!("joined {joined} tasks, remaining {}", join_set.len()); + trace!("joined {joined} tasks, remaining {}", join_set.len()); let res = match res { Ok(Ok(())) => Ok(()), Ok(Err(err)) => Err(err), diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 06418ad5eb..b887105d47 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -10,7 +10,7 @@ use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, meadowcap, - willow::{Entry, DIGEST_LENGTH}, + willow::{Entry, NamespaceId, DIGEST_LENGTH}, }; pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; @@ -55,7 +55,7 @@ pub type SyncSignature = meadowcap::UserSignature; pub type Receiver = meadowcap::UserPublicKey; /// Represents an authorisation to read an area of data in a Namespace. -#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq)] +#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct ReadAuthorisation(pub ReadCapability, pub Option); impl From for ReadAuthorisation { @@ -76,6 +76,10 @@ impl ReadAuthorisation { pub fn subspace_cap(&self) -> Option<&SubspaceCapability> { self.1.as_ref() } + + pub fn namespace(&self) -> NamespaceId { + self.0.granted_namespace().id() + } } /// The different resource handles employed by the WGPS. @@ -304,7 +308,7 @@ pub enum Message { #[debug("{:?}", _0)] PaiRequestSubspaceCapability(PaiRequestSubspaceCapability), #[debug("{:?}", _0)] - PaiReplySubspaceCapability(PaiReplySubspaceCapability), + PaiReplySubspaceCapability(Box), #[debug("{:?}", _0)] SetupBindStaticToken(SetupBindStaticToken), #[debug("{:?}", _0)] @@ -873,39 +877,40 @@ pub struct ControlFreeHandle { handle_type: HandleType, } -type PsiGroup = (); +pub type PsiGroupBytes = [u8; 32]; + /// Bind data to an IntersectionHandle for performing private area intersection. #[derive(Debug, Serialize, Deserialize)] pub struct PaiBindFragment { /// The result of first applying hash_into_group to some fragment for private area intersection and then performing scalar multiplication with scalar. - group_member: PsiGroup, + pub group_member: PsiGroupBytes, /// Set to true if the private set intersection item is a secondary fragment. - is_secondary: bool, + pub is_secondary: bool, } /// Finalise private set intersection for a single item. #[derive(Debug, Serialize, Deserialize)] pub struct PaiReplyFragment { /// The IntersectionHandle of the PaiBindFragment message which this finalises. - handle: IntersectionHandle, + pub handle: IntersectionHandle, /// The result of performing scalar multiplication between the group_member of the message that this is replying to and scalar. - group_member: PsiGroup, + pub group_member: PsiGroupBytes, } /// Ask the receiver to send a SubspaceCapability. #[derive(Debug, Serialize, Deserialize)] pub struct PaiRequestSubspaceCapability { /// The IntersectionHandle bound by the sender for the least-specific secondary fragment for whose NamespaceId to request the SubspaceCapability. - handle: IntersectionHandle, + pub handle: IntersectionHandle, } /// Send a previously requested SubspaceCapability. #[derive(Debug, Serialize, Deserialize)] pub struct PaiReplySubspaceCapability { /// The handle of the PaiRequestSubspaceCapability message that this answers (hence, an IntersectionHandle bound by the receiver of this message). - handle: IntersectionHandle, + pub handle: IntersectionHandle, /// A SubspaceCapability whose granted namespace corresponds to the request this answers. - capability: SubspaceCapability, + pub capability: SubspaceCapability, /// The SyncSubspaceSignature issued by the receiver of the capability over the sender’s challenge. - signature: SyncSignature, + pub signature: SyncSignature, } diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index a53b4a9153..e4bc2d3390 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -93,6 +93,11 @@ impl Path { Path(path) } + pub fn from_components(components: &[Component]) -> Self { + let path: Arc<[Component]> = components.to_vec().into(); + Self(path) + } + pub fn validate(components: &[&[u8]]) -> Result<(), InvalidPath> { if components.len() > MAX_COMPONENT_COUNT { return Err(InvalidPath::TooManyComponents); @@ -146,6 +151,14 @@ impl Path { let start = count.min(self.len()); Self::new_unchecked(self[start..].to_vec()) } + + pub fn component_count(&self) -> usize { + self.0.len() + } + + pub fn components(&self) -> &[Component] { + &self.0 + } } impl std::ops::Deref for Path { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 7db3a95db3..b7457b0e82 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -12,6 +12,7 @@ use crate::{ pub mod channels; mod data; mod error; +mod pai; mod payload; mod reconciler; mod resource; @@ -113,7 +114,7 @@ pub enum Scope { /// Intersection between two areas of interest. #[derive(Debug, Clone)] -pub struct AreaOfInterestIntersection { +pub struct AoiIntersection { pub our_handle: AreaOfInterestHandle, pub their_handle: AreaOfInterestHandle, pub intersection: Area, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 48ff2b9785..c5478fb5bb 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -6,6 +6,7 @@ use crate::{ sync::ResourceHandle, willow::Unauthorised, }, + session::{pai::PaiError, resource::MissingResource}, store::traits::SecretStoreError, util::channel::{ReadError, WriteError}, }; @@ -64,6 +65,10 @@ pub enum Error { MissingUserKey(UserId), #[error("a task failed to join")] TaskFailed(#[from] tokio::task::JoinError), + #[error("no known interests for given capability")] + NoKnownInterestsForCapability, + #[error("private area intersection error: {0}")] + Pai(#[from] PaiError), } impl From for Error { @@ -88,3 +93,9 @@ impl From for Error { Self::InvalidParameters("") } } + +impl From for Error { + fn from(value: MissingResource) -> Self { + Self::MissingResource(value.0) + } +} diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs new file mode 100644 index 0000000000..d4f12c0da5 --- /dev/null +++ b/iroh-willow/src/session/pai.rs @@ -0,0 +1,602 @@ +//! Private Area Intersection finder +//! +//! As defined by the willow spec: [Private Area Intersection](https://willowprotocol.org/specs/pai/index.html) +//! +//! Partly ported from the implementation in earthstar and willow: +//! * https://github.com/earthstar-project/willow-js/blob/0db4b9ec7710fb992ab75a17bd8557040d9a1062/src/wgps/pai/pai_finder.ts +//! * https://github.com/earthstar-project/earthstar/blob/16d6d4028c22fdbb72f7395013b29be7dcd9217a/src/schemes/schemes.ts#L662 +//! Licensed under LGPL and ported into this MIT/Apache codebase with explicit permission +//! from the original author (gwil). + +use std::collections::{HashMap, HashSet}; + +use anyhow::Result; +use curve25519_dalek::{ristretto::CompressedRistretto, RistrettoPoint, Scalar}; +use futures_lite::StreamExt; +use tracing::debug; + +use crate::{ + proto::{ + grouping::SubspaceArea, + sync::{ + IntersectionHandle, IntersectionMessage, PaiBindFragment, PaiReplyFragment, + PaiRequestSubspaceCapability, ReadAuthorisation, ReadCapability, + }, + willow::{NamespaceId, Path, SubspaceId}, + }, + session::{ + channels::MessageReceiver, + resource::{MissingResource, ResourceMap}, + Error, Scope, Session, + }, + store::{traits::Storage, Store}, + util::{codec::Encoder, stream::Cancelable}, +}; + +#[derive(Debug, thiserror::Error)] +pub enum PaiError { + #[error("Partner replied with subspace cap for handle which we never sent a request for")] + SubspaceCapRequestForInvalidHandle, + #[error("Partner replied with subspace capability for the wrong namespace")] + SubspaceCapRequestForWrongNamespace, + #[error("Missing resource {:?}", _0.0)] + MissingResource(#[from] MissingResource), +} + +#[derive(Debug)] +pub enum ToPai { + SubmitAuthorisation(ReadAuthorisation), + ReceivedSubspaceCapRequest(IntersectionHandle), + ReceivedVerifiedSubspaceCapReply(IntersectionHandle, NamespaceId), + ReceivedReadCapForIntersection(IntersectionHandle), +} + +#[derive(Debug)] +pub struct PaiFinder { + session: Session, + store: Store, + scalar: PsiScalar, + fragments_info: HashMap, + our_intersection_handles: ResourceMap, + their_intersection_handles: ResourceMap, + requested_subspace_cap_handles: HashSet, +} + +impl PaiFinder { + pub fn new(session: Session, store: Store) -> Self { + Self { + session, + store, + scalar: PaiScheme::get_scalar(), + our_intersection_handles: Default::default(), + their_intersection_handles: Default::default(), + fragments_info: Default::default(), + requested_subspace_cap_handles: Default::default(), + } + } + + pub async fn run( + mut self, + to_pai: flume::Receiver, + mut recv: Cancelable>, + ) -> Result<(), Error> { + loop { + tokio::select! { + action = to_pai.recv_async() => { + match action { + Err(_) => break, + Ok(action) => self.on_action(action).await? + } + } + message = recv.next() => { + match message { + None => break, + Some(message) => self.on_message(message?).await? + } + } + } + } + Ok(()) + } + + async fn on_message(&mut self, message: IntersectionMessage) -> Result<(), Error> { + debug!("on_message {message:?}"); + match message { + IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, + IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, + } + Ok(()) + } + + async fn on_action(&mut self, action: ToPai) -> Result<(), Error> { + debug!("on_action {action:?}"); + match action { + ToPai::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await?, + ToPai::ReceivedSubspaceCapRequest(handle) => { + self.received_subspace_cap_request(handle).await? + } + ToPai::ReceivedVerifiedSubspaceCapReply(handle, namespace) => { + self.received_verified_subspace_cap_reply(handle, namespace)? + } + ToPai::ReceivedReadCapForIntersection(handle) => { + self.received_read_cap_for_intersection(handle)? + } + } + Ok(()) + } + + async fn submit_autorisation(&mut self, authorisation: ReadAuthorisation) -> Result<(), Error> { + let read_cap = authorisation.read_cap(); + let fragment_kit = PaiScheme::get_fragment_kit(read_cap); + let fragment_set = fragment_kit.into_fragment_set(); + match fragment_set { + FragmentSet::Complete(pairs) => { + let last = pairs.len().wrapping_sub(1); + for (i, pair) in pairs.into_iter().enumerate() { + let is_most_specific = i == last; + let (namespace_id, path) = pair.clone(); + let (handle, message) = self.submit_fragment(Fragment::Pair(pair), false)?; + let info = LocalFragmentInfo { + on_intersection: IntersectionAction::new_primary(is_most_specific), + authorisation: authorisation.clone(), + namespace_id, + path, + subspace: SubspaceArea::Any, + }; + self.fragments_info.insert(handle, info); + self.session.send(message).await?; + } + } + FragmentSet::Selective { primary, secondary } => { + let last = primary.len().wrapping_sub(1); + for (i, triple) in primary.into_iter().enumerate() { + let is_most_specific = i == last; + let (namespace_id, subspace_id, path) = triple.clone(); + let (handle, message) = + self.submit_fragment(Fragment::Triple(triple), false)?; + let info = LocalFragmentInfo { + on_intersection: IntersectionAction::new_primary(is_most_specific), + authorisation: authorisation.clone(), + namespace_id, + path, + subspace: SubspaceArea::Id(subspace_id), + }; + self.fragments_info.insert(handle, info); + self.session.send(message).await?; + } + let last = secondary.len().wrapping_sub(1); + for (i, pair) in secondary.into_iter().enumerate() { + let is_most_specific = i == last; + let (namespace_id, path) = pair.clone(); + let (handle, message) = self.submit_fragment(Fragment::Pair(pair), true)?; + let info = LocalFragmentInfo { + on_intersection: IntersectionAction::new_secondary(is_most_specific), + authorisation: authorisation.clone(), + namespace_id, + path, + subspace: SubspaceArea::Any, + }; + self.fragments_info.insert(handle, info); + self.session.send(message).await?; + } + } + } + Ok(()) + } + + fn submit_fragment( + &mut self, + fragment: Fragment, + is_secondary: bool, + ) -> Result<(IntersectionHandle, PaiBindFragment)> { + let unmixed = PaiScheme::fragment_to_group(fragment); + let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); + let info = FragmentInfo { + group: multiplied, + state: FragmentState::Pending, + is_secondary, + }; + let message = info.to_message(); + let handle = self.our_intersection_handles.bind(info); + Ok((handle, message)) + } + + async fn receive_bind(&mut self, message: PaiBindFragment) -> Result<()> { + let PaiBindFragment { + group_member, + is_secondary, + } = message; + let unmixed = PsiGroup::from_bytes(group_member)?; + let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); + let fragment = FragmentInfo { + group: multiplied, + is_secondary, + state: FragmentState::Pending, + }; + let handle = self.their_intersection_handles.bind(fragment); + let reply = PaiReplyFragment { + handle, + group_member, + }; + self.session.send(reply).await?; + self.check_for_intersection(handle, Scope::Theirs).await?; + Ok(()) + } + + async fn receive_reply(&mut self, message: PaiReplyFragment) -> Result<()> { + let PaiReplyFragment { + handle, + group_member, + } = message; + let group_member = PsiGroup::from_bytes(group_member)?; + let intersection = self.our_intersection_handles.try_get(&handle)?; + let fragment = FragmentInfo { + group: group_member, + is_secondary: intersection.is_secondary, + state: FragmentState::Complete, + }; + self.our_intersection_handles.update(handle, fragment)?; + self.check_for_intersection(handle, Scope::Ours).await?; + Ok(()) + } + + async fn check_for_intersection( + &mut self, + handle: IntersectionHandle, + scope: Scope, + ) -> Result<(), Error> { + let store_to_check = match scope { + Scope::Ours => &self.our_intersection_handles, + Scope::Theirs => &self.their_intersection_handles, + }; + let intersection = store_to_check.try_get(&handle)?; + + if !intersection.is_complete() { + return Ok(()); + } + + // Here we are looping through the whole contents of the handle store because... + // otherwise we need to build a special handle store just for intersections. + // Which we might do one day, but I'm not convinced it's worth it yet. + for (other_handle, other_intersection) in store_to_check.iter() { + if !other_intersection.completes_with(intersection) { + continue; + } + + // If there is an intersection, check what we have to do! + let our_handle = match scope { + Scope::Ours => handle, + Scope::Theirs => *other_handle, + }; + + let fragment_info = self + .fragments_info + .get(&our_handle) + .ok_or(Error::MissingResource(our_handle.into()))?; + + match fragment_info.on_intersection { + IntersectionAction::BindReadCap => { + let intersection = fragment_info.to_pai_intersection(our_handle); + self.session.push_pai_intersection(intersection); + } + IntersectionAction::RequestSubspaceCap => { + self.requested_subspace_cap_handles.insert(our_handle); + let message = PaiRequestSubspaceCapability { handle }; + self.session.send(message).await?; + } + IntersectionAction::ReplyReadCap | IntersectionAction::DoNothing => {} + } + } + + Ok(()) + } + + fn received_read_cap_for_intersection( + &mut self, + their_handle: IntersectionHandle, + ) -> Result<()> { + let their_intersection = self.their_intersection_handles.try_get(&their_handle)?; + for (our_handle, our_intersection) in self.our_intersection_handles.iter() { + if !our_intersection.completes_with(their_intersection) { + continue; + } + let fragment_info = self + .fragments_info + .get(our_handle) + .ok_or(Error::MissingResource((*our_handle).into()))?; + if let IntersectionAction::ReplyReadCap = fragment_info.on_intersection { + let intersection = fragment_info.to_pai_intersection(*our_handle); + self.session.push_pai_intersection(intersection); + } + } + Ok(()) + } + + fn received_verified_subspace_cap_reply( + &mut self, + handle: IntersectionHandle, + namespace_id: NamespaceId, + ) -> Result<(), PaiError> { + if !self.requested_subspace_cap_handles.remove(&handle) { + return Err(PaiError::SubspaceCapRequestForInvalidHandle); + } + let _ = self.our_intersection_handles.try_get(&handle)?; + let fragment_info = self + .fragments_info + .get(&handle) + .ok_or(PaiError::SubspaceCapRequestForInvalidHandle)?; + + if fragment_info.namespace_id != namespace_id { + return Err(PaiError::SubspaceCapRequestForWrongNamespace); + } + let intersection = fragment_info.to_pai_intersection(handle); + self.session.push_pai_intersection(intersection); + Ok(()) + } + + pub async fn received_subspace_cap_request( + &mut self, + handle: IntersectionHandle, + ) -> Result<(), Error> { + let result = self.their_intersection_handles.try_get(&handle)?; + for (our_handle, intersection) in self.our_intersection_handles.iter() { + if !intersection.is_complete() { + continue; + } + if !PaiScheme::is_group_equal(&result.group, &intersection.group) { + continue; + } + let fragment_info = self + .fragments_info + .get(our_handle) + .ok_or(PaiError::SubspaceCapRequestForInvalidHandle)?; + if let Some(cap) = fragment_info.authorisation.subspace_cap() { + let message = + self.session + .sign_subspace_capabiltiy(self.store.secrets(), cap, handle)?; + self.session.send(Box::new(message)).await?; + } + } + Ok(()) + } +} + +#[derive(Debug)] +pub struct LocalFragmentInfo { + on_intersection: IntersectionAction, + authorisation: ReadAuthorisation, + namespace_id: NamespaceId, + // will be needed for spec-compliant encodings of read capabilities + #[allow(dead_code)] + path: Path, + // will be needed for spec-compliant encodings of read capabilities + #[allow(dead_code)] + subspace: SubspaceArea, +} + +impl LocalFragmentInfo { + fn to_pai_intersection(&self, handle: IntersectionHandle) -> PaiIntersection { + PaiIntersection { + authorisation: self.authorisation.clone(), + handle, + } + } +} + +#[derive(Debug, Clone)] +pub enum Fragment { + Pair(FragmentPair), + Triple(FragmentTriple), +} + +impl Encoder for Fragment { + fn encoded_len(&self) -> usize { + match self { + Fragment::Pair((_, path)) => NamespaceId::LENGTH + path.encoded_len(), + Fragment::Triple((_, _, path)) => { + NamespaceId::LENGTH + SubspaceId::LENGTH + path.encoded_len() + } + } + } + fn encode_into(&self, out: &mut W) -> Result<()> { + match self { + Fragment::Pair((namespace_id, path)) => { + out.write_all(namespace_id.as_bytes())?; + path.encode_into(out)?; + } + Fragment::Triple((namespace_id, subspace_id, path)) => { + out.write_all(namespace_id.as_bytes())?; + out.write_all(subspace_id.as_bytes())?; + path.encode_into(out)?; + } + } + Ok(()) + } +} + +pub type FragmentTriple = (NamespaceId, SubspaceId, Path); + +pub type FragmentPair = (NamespaceId, Path); + +#[derive(Debug, Clone)] +pub enum FragmentSet { + Complete(Vec), + Selective { + primary: Vec, + secondary: Vec, + }, +} + +#[derive(Debug)] +pub enum FragmentKit { + Complete(NamespaceId, Path), + Selective(NamespaceId, SubspaceId, Path), +} + +impl FragmentKit { + fn into_fragment_set(self) -> FragmentSet { + match self { + FragmentKit::Complete(namespace_id, path) => { + let mut pairs = vec![]; + for prefix in prefixes_of(&path) { + pairs.push((namespace_id, prefix)); + } + FragmentSet::Complete(pairs) + } + FragmentKit::Selective(namespace_id, subspace_id, path) => { + let mut primary = vec![]; + let mut secondary = vec![]; + for prefix in prefixes_of(&path) { + primary.push((namespace_id, subspace_id, prefix.clone())); + secondary.push((namespace_id, prefix.clone())); + } + FragmentSet::Selective { primary, secondary } + } + } + } +} + +fn prefixes_of(path: &Path) -> Vec { + let mut out = vec![Path::empty()]; + let components = path.components(); + if components.is_empty() { + return out; + } + for i in 1..=components.len() { + let prefix = Path::from_components(&components[..i]); + out.push(prefix); + } + out +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct PsiGroup(RistrettoPoint); + +#[derive(Debug, thiserror::Error)] +#[error("Invalid Psi Group")] +pub struct InvalidPsiGroup; + +impl PsiGroup { + pub fn from_bytes(bytes: [u8; 32]) -> Result { + let compressed = CompressedRistretto(bytes); + let uncompressed = compressed.decompress().ok_or(InvalidPsiGroup)?; + Ok(Self(uncompressed)) + } + + pub fn to_bytes(self) -> [u8; 32] { + self.0.compress().0 + } +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct PsiScalar(Scalar); + +pub struct PaiScheme; + +impl PaiScheme { + fn fragment_to_group(fragment: Fragment) -> PsiGroup { + let encoded = fragment.encode().expect("encoding not to fail"); + let point = RistrettoPoint::hash_from_bytes::(&encoded); + PsiGroup(point) + } + + fn get_scalar() -> PsiScalar { + PsiScalar(Scalar::random(&mut rand::thread_rng())) + } + + fn scalar_mult(group: PsiGroup, scalar: PsiScalar) -> PsiGroup { + PsiGroup(group.0 * scalar.0) + } + + fn is_group_equal(a: &PsiGroup, b: &PsiGroup) -> bool { + a == b + } + + fn get_fragment_kit(cap: &ReadCapability) -> FragmentKit { + let granted_area = cap.granted_area(); + let granted_namespace = cap.granted_namespace().id(); + let granted_path = granted_area.path.clone(); + + match granted_area.subspace { + SubspaceArea::Any => FragmentKit::Complete(granted_namespace, granted_path), + SubspaceArea::Id(granted_subspace) => { + FragmentKit::Selective(granted_namespace, granted_subspace, granted_path) + } + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum FragmentState { + Pending, + Complete, +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct FragmentInfo { + group: PsiGroup, + state: FragmentState, + is_secondary: bool, +} + +#[derive(Debug)] +pub struct PaiIntersection { + pub authorisation: ReadAuthorisation, + pub handle: IntersectionHandle, +} + +impl FragmentInfo { + fn to_message(&self) -> PaiBindFragment { + PaiBindFragment { + group_member: self.group.to_bytes(), + is_secondary: self.is_secondary, + } + } + + fn is_complete(&self) -> bool { + matches!(self.state, FragmentState::Complete) + } + + fn is_secondary(&self) -> bool { + self.is_secondary + } + + fn completes_with(&self, other: &Self) -> bool { + if !self.is_complete() || !other.is_complete() { + return false; + } + if self.is_secondary() && other.is_secondary() { + return false; + } + if !PaiScheme::is_group_equal(&self.group, &other.group) { + return false; + } + true + } +} + +#[derive(Debug, Clone, Copy)] +pub enum IntersectionAction { + DoNothing, + BindReadCap, + RequestSubspaceCap, + ReplyReadCap, +} + +impl IntersectionAction { + pub fn new_primary(is_most_specific: bool) -> Self { + if is_most_specific { + IntersectionAction::BindReadCap + } else { + IntersectionAction::ReplyReadCap + } + } + + pub fn new_secondary(is_most_specific: bool) -> Self { + if is_most_specific { + IntersectionAction::RequestSubspaceCap + } else { + IntersectionAction::DoNothing + } + } +} diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 5f05c6f179..1e4a2ea9f3 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -15,7 +15,7 @@ use crate::{ session::{ channels::MessageReceiver, payload::{send_payload_chunked, CurrentPayload}, - AreaOfInterestIntersection, Error, Session, + AoiIntersection, Error, Session, }, store::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, @@ -93,8 +93,8 @@ impl Reconciler { Ok(()) } - async fn initiate(&mut self, intersection: AreaOfInterestIntersection) -> Result<(), Error> { - let AreaOfInterestIntersection { + async fn initiate(&mut self, intersection: AoiIntersection) -> Result<(), Error> { + let AoiIntersection { our_handle, their_handle, intersection, diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 63e027b6f7..d0a0150b9e 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, VecDeque}, + collections::{hash_map, HashMap, VecDeque}, task::{Context, Poll, Waker}, }; @@ -33,8 +33,9 @@ impl ResourceMaps { F: for<'a> Fn(&'a Self) -> &'a ResourceMap, R: Eq + PartialEq + Clone, { - let res = selector(self); - res.try_get(&handle).cloned() + let store = selector(self); + let res = store.try_get(&handle).cloned()?; + Ok(res) } pub fn poll_get_eventually( @@ -111,12 +112,12 @@ where } } - pub fn try_get(&self, handle: &H) -> Result<&R, Error> { + pub fn try_get(&self, handle: &H) -> Result<&R, MissingResource> { self.map .get(handle) .as_ref() .map(|r| &r.value) - .ok_or_else(|| Error::MissingResource((*handle).into())) + .ok_or_else(|| MissingResource((*handle).into())) } pub fn get(&self, handle: &H) -> Option<&R> { @@ -151,8 +152,22 @@ where Poll::Pending } } + + pub fn update(&mut self, handle: H, resource: R) -> Result<(), Error> { + match self.map.entry(handle) { + hash_map::Entry::Vacant(_) => Err(Error::MissingResource(handle.into())), + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().value = resource; + Ok(()) + } + } + } } +#[derive(Debug, thiserror::Error)] +#[error("missing resource {0:?}")] +pub struct MissingResource(pub ResourceHandle); + // #[derive(Debug)] // enum ResourceState { // Active, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 3f2dad41ca..c235a06ec5 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -4,8 +4,12 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace, warn}; use crate::{ - proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, - session::{channels::LogicalChannelReceivers, Error, Scope, Session, SessionInit}, + proto::sync::{ControlIssueGuarantee, LogicalChannel, Message}, + session::{ + channels::LogicalChannelReceivers, + pai::{PaiFinder, ToPai}, + Error, Session, + }, store::{traits::Storage, Store}, util::{channel::Receiver, stream::Cancelable}, }; @@ -24,7 +28,6 @@ impl Session { self, store: Store, recv: ChannelReceivers, - init: SessionInit, cancel_token: CancellationToken, ) -> Result<(), Error> { let ChannelReceivers { @@ -43,11 +46,19 @@ impl Session { // Make all our receivers close once the cancel_token is triggered. let control_recv = Cancelable::new(control_recv, cancel_token.clone()); let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); + let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); + // Setup the private area intersection finder. + let pai_finder = PaiFinder::new(self.clone(), store.clone()); + let (to_pai_tx, to_pai_rx) = flume::bounded(128); + self.spawn(error_span!("pai"), { + move |_session| async move { pai_finder.run(to_pai_rx, intersection_recv).await } + }); + // Spawn a task to handle incoming static tokens. self.spawn(error_span!("stt"), move |session| async move { while let Some(message) = static_tokens_recv.try_next().await? { @@ -57,7 +68,7 @@ impl Session { }); // Only setup data receiver if session is configured in live mode. - if init.mode == SessionMode::Live { + if self.mode() == SessionMode::Live { self.spawn(error_span!("dat:r"), { let store = store.clone(); move |session| async move { @@ -78,11 +89,19 @@ impl Session { } // Spawn a task to handle incoming capabilities. - self.spawn(error_span!("cap"), move |session| async move { - while let Some(message) = capability_recv.try_next().await? { - session.on_setup_bind_read_capability(message)?; + self.spawn(error_span!("cap"), { + let to_pai = to_pai_tx.clone(); + move |session| async move { + while let Some(message) = capability_recv.try_next().await? { + let handle = message.handle; + session.on_setup_bind_read_capability(message)?; + to_pai + .send_async(ToPai::ReceivedReadCapForIntersection(handle)) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } + Ok(()) } - Ok(()) }); // Spawn a task to handle incoming areas of interest. @@ -109,12 +128,24 @@ impl Session { } }); + // Spawn a task to react to found PAI intersections. + let pai_intersections = self.pai_intersection_stream(); + let mut pai_intersections = Cancelable::new(pai_intersections, cancel_token.clone()); + self.spawn(error_span!("pai:intersections"), { + let store = store.clone(); + move |session| async move { + while let Some(intersection) = pai_intersections.next().await { + session.on_pai_intersection(&store, intersection).await?; + } + Ok(()) + } + }); + // Spawn a task to handle control messages self.spawn(error_span!("ctl"), { - let store = store.clone(); let cancel_token = cancel_token.clone(); move |session| async move { - let res = control_loop(session, store, control_recv, init).await; + let res = control_loop(session, control_recv, to_pai_tx).await; cancel_token.cancel(); res } @@ -152,7 +183,10 @@ impl Session { let _guard = span.enter(); trace!(?result, remaining = self.remaining_tasks(), "task complete"); if let Err(err) = result { - warn!("task failed: {err:?}"); + match err { + Error::TaskFailed(err) if err.is_cancelled() => {} + err => warn!("task failed: {err:?}"), + } } } @@ -165,14 +199,13 @@ impl Session { } } -async fn control_loop( +async fn control_loop( session: Session, - store: Store, mut control_recv: Cancelable>, - init: SessionInit, + to_pai: flume::Sender, ) -> Result<(), Error> { debug!(role = ?session.our_role(), "start session"); - let mut init = Some(init); + let mut commitment_revealed = false; // Reveal our nonce. let reveal_message = session.reveal_commitment()?; @@ -191,18 +224,36 @@ async fn control_loop( match message { Message::CommitmentReveal(msg) => { session.on_commitment_reveal(msg)?; - let init = init.take().ok_or(Error::InvalidMessageInCurrentState)?; - // send setup messages, but in a separate task to not block incoming guarantees - let store = store.clone(); - session.spawn(error_span!("setup"), move |session| { - setup(store, session, init) + if commitment_revealed { + return Err(Error::InvalidMessageInCurrentState)?; + } + commitment_revealed = true; + let to_pai = to_pai.clone(); + session.spawn(error_span!("setup-pai"), move |session| { + setup_pai(session, to_pai) }); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; - trace!(?channel, %amount, "add guarantees"); + // trace!(?channel, %amount, "add guarantees"); session.add_guarantees(channel, amount); } + Message::PaiRequestSubspaceCapability(msg) => { + to_pai + .send_async(ToPai::ReceivedSubspaceCapRequest(msg.handle)) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } + Message::PaiReplySubspaceCapability(msg) => { + session.verify_subspace_capability(&msg)?; + to_pai + .send_async(ToPai::ReceivedVerifiedSubspaceCapReply( + msg.handle, + msg.capability.granted_namespace().id(), + )) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } _ => return Err(Error::UnsupportedMessage), } } @@ -210,38 +261,12 @@ async fn control_loop( Ok(()) } -async fn setup( - store: Store, - session: Session, - init: SessionInit, -) -> Result<(), Error> { - // debug!(interests = init.interests.len(), "start setup"); - debug!(?init, "start setup"); - let interests = store.auth().find_read_caps_for_interests(init.interests)?; - debug!(?interests, "found interests"); - for (authorisation, aois) in interests { - // TODO: implement private area intersection - let intersection_handle = 0.into(); - let read_cap = authorisation.read_cap(); - let (our_capability_handle, message) = session.bind_and_sign_capability( - store.secrets(), - intersection_handle, - read_cap.clone(), - )?; - if let Some(message) = message { - session.send(message).await?; - } - - for area_of_interest in aois { - let msg = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - // TODO: We could skip the clone if we re-enabled sending by reference. - session.bind_area_of_interest(Scope::Ours, msg.clone(), read_cap)?; - session.send(msg).await?; - } +async fn setup_pai(session: Session, to_pai: flume::Sender) -> Result<(), Error> { + for authorisation in session.interests().keys() { + to_pai + .send_async(ToPai::SubmitAuthorisation(authorisation.clone())) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; } - debug!("setup done"); Ok(()) } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs index 2323ade36d..1ef26e9d31 100644 --- a/iroh-willow/src/session/state.rs +++ b/iroh-willow/src/session/state.rs @@ -11,27 +11,32 @@ use futures_lite::Stream; use tracing::{debug, trace, Instrument, Span}; use crate::{ + auth::InterestMap, proto::{ challenge::ChallengeState, grouping::ThreeDRange, keys::NamespaceId, sync::{ AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, DynamicToken, - IntersectionHandle, IsHandle, LogicalChannel, Message, ReadCapability, - ReconciliationAnnounceEntries, ReconciliationSendFingerprint, SetupBindAreaOfInterest, - SetupBindReadCapability, SetupBindStaticToken, StaticToken, StaticTokenHandle, + IntersectionHandle, IsHandle, LogicalChannel, Message, PaiReplySubspaceCapability, + ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendFingerprint, + SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, + StaticTokenHandle, SubspaceCapability, }, willow::{AuthorisedEntry, Entry}, }, - session::InitialTransmission, - store::traits::SecretStorage, + session::{pai::PaiIntersection, InitialTransmission, SessionInit}, + store::{ + traits::{SecretStorage, Storage}, + Store, + }, util::{channel::WriteError, queue::Queue, task::JoinMap}, }; use super::{ channels::ChannelSenders, resource::{ResourceMap, ResourceMaps}, - AreaOfInterestIntersection, Error, Role, Scope, SessionId, SessionMode, + AoiIntersection, Error, Role, Scope, SessionId, SessionMode, }; #[derive(Debug, Clone)] @@ -42,36 +47,44 @@ struct SessionInner { id: SessionId, our_role: Role, mode: SessionMode, + interests: InterestMap, state: RefCell, send: ChannelSenders, tasks: RefCell>>, } impl Session { - pub fn new( + pub fn new( + store: &Store, id: SessionId, - mode: SessionMode, our_role: Role, send: ChannelSenders, + init: SessionInit, initial_transmission: InitialTransmission, - ) -> Self { + ) -> Result { let state = SessionState::new(initial_transmission); - Self(Rc::new(SessionInner { - mode, + let interests = store.auth().find_read_caps_for_interests(init.interests)?; + Ok(Self(Rc::new(SessionInner { + mode: init.mode, id, our_role, + interests, state: RefCell::new(state), send, tasks: Default::default(), - })) + }))) } pub fn id(&self) -> &SessionId { &self.0.id } - pub fn mode(&self) -> &SessionMode { - &self.0.mode + pub fn mode(&self) -> SessionMode { + self.0.mode + } + + pub fn interests(&self) -> &InterestMap { + &self.0.interests } pub fn spawn(&self, span: Span, f: F) @@ -157,9 +170,9 @@ impl Session { self.0.our_role } - pub async fn next_aoi_intersection(&self) -> Option { + pub async fn next_aoi_intersection(&self) -> Option { poll_fn(|cx| { - let mut queue = &mut self.0.state.borrow_mut().intersection_queue; + let mut queue = &mut self.0.state.borrow_mut().aoi_intersection_queue; Pin::new(&mut queue).poll_next(cx) }) .await @@ -195,6 +208,23 @@ impl Session { .await } + pub fn sign_subspace_capabiltiy( + &self, + key_store: &K, + cap: &SubspaceCapability, + handle: IntersectionHandle, + ) -> Result { + let inner = self.state(); + let signable = inner.challenge.signable()?; + let signature = key_store.sign_user(&cap.receiver().id(), &signable)?; + let message = PaiReplySubspaceCapability { + handle, + capability: cap.clone(), + signature, + }; + Ok(message) + } + pub fn bind_and_sign_capability( &self, key_store: &K, @@ -323,6 +353,17 @@ impl Session { Ok(()) } + pub fn verify_subspace_capability( + &self, + msg: &PaiReplySubspaceCapability, + ) -> Result<(), Error> { + msg.capability.validate()?; + self.state() + .challenge + .verify(msg.capability.receiver(), &msg.signature)?; + Ok(()) + } + pub fn reconciliation_is_complete(&self) -> bool { let state = self.state(); // tracing::debug!( @@ -434,6 +475,64 @@ impl Session { (handle, msg) } + pub fn push_pai_intersection(&self, intersection: PaiIntersection) { + self.state_mut() + .pai_intersection_queue + .push_back(intersection) + } + + pub async fn next_pai_intersection(&self) -> Option { + poll_fn(|cx| { + let mut queue = &mut self.0.state.borrow_mut().pai_intersection_queue; + Pin::new(&mut queue).poll_next(cx) + }) + .await + } + + pub fn pai_intersection_stream(&self) -> PaiIntersectionStream { + PaiIntersectionStream { + session: self.clone(), + } + } + + pub async fn on_pai_intersection( + &self, + store: &Store, + intersection: PaiIntersection, + ) -> Result<(), Error> { + // TODO: Somehow getting from the BTreeMap is not working, even though the equality check + // below works as exepcted. + // let aois = self + // .0 + // .interests + // .get(&intersection.authorisation) + // .ok_or(Error::NoKnownInterestsForCapability)?; + for (authorisation, aois) in self.0.interests.iter() { + if *authorisation != intersection.authorisation { + continue; + } + let read_cap = authorisation.read_cap(); + let (our_capability_handle, message) = self.bind_and_sign_capability( + store.secrets(), + intersection.handle, + read_cap.clone(), + )?; + if let Some(message) = message { + self.send(message).await?; + } + + for area_of_interest in aois.iter().cloned() { + let msg = SetupBindAreaOfInterest { + area_of_interest, + authorisation: our_capability_handle, + }; + self.bind_area_of_interest(Scope::Ours, msg.clone(), read_cap)?; + self.send(msg).await?; + } + } + Ok(()) + } + async fn their_aoi_to_namespace_eventually( &self, handle: AreaOfInterestHandle, @@ -479,7 +578,8 @@ struct SessionState { our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, pending_announced_entries: Option, - intersection_queue: Queue, + aoi_intersection_queue: Queue, + pai_intersection_queue: Queue, } impl SessionState { @@ -499,7 +599,8 @@ impl SessionState { our_uncovered_ranges: Default::default(), their_uncovered_ranges: Default::default(), pending_announced_entries: Default::default(), - intersection_queue: Default::default(), + aoi_intersection_queue: Default::default(), + pai_intersection_queue: Default::default(), } } @@ -523,6 +624,7 @@ impl SessionState { Scope::Theirs => &self.our_resources, }; + // TODO: If we stored the AoIs by namespace we would need to iterate less. for (candidate_handle, candidate) in other_resources.areas_of_interest.iter() { let candidate_handle = *candidate_handle; // Ignore areas without a capability. @@ -540,13 +642,13 @@ impl SessionState { Scope::Ours => (handle, candidate_handle), Scope::Theirs => (candidate_handle, handle), }; - let info = AreaOfInterestIntersection { + let info = AoiIntersection { our_handle, their_handle, intersection, namespace: namespace.into(), }; - self.intersection_queue.push_back(info); + self.aoi_intersection_queue.push_back(info); } } Ok(()) @@ -601,3 +703,20 @@ impl SessionState { range_count } } + +#[derive(Debug)] +pub struct PaiIntersectionStream { + session: Session, +} + +impl Stream for PaiIntersectionStream { + type Item = PaiIntersection; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + let mut queue = &mut self.session.0.state.borrow_mut().pai_intersection_queue; + Pin::new(&mut queue).poll_next(cx) + } +} From 622a56554f8ef27c96405c76bbf591db2b7d190c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 27 Jun 2024 20:58:03 +0200 Subject: [PATCH 074/198] refactor: add proto::pai and more docs --- iroh-willow/src/proto.rs | 1 + iroh-willow/src/proto/pai.rs | 150 +++++++++++++++++++++++++++++++ iroh-willow/src/proto/willow.rs | 13 +++ iroh-willow/src/session/pai.rs | 153 ++------------------------------ 4 files changed, 169 insertions(+), 148 deletions(-) create mode 100644 iroh-willow/src/proto/pai.rs diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 05a43e4fff..d527548475 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -2,5 +2,6 @@ pub mod challenge; pub mod grouping; pub mod keys; pub mod meadowcap; +pub mod pai; pub mod sync; pub mod willow; diff --git a/iroh-willow/src/proto/pai.rs b/iroh-willow/src/proto/pai.rs new file mode 100644 index 0000000000..349a9b3640 --- /dev/null +++ b/iroh-willow/src/proto/pai.rs @@ -0,0 +1,150 @@ +//! Primitives for [Private Area Intersection] +//! +//! * Uses ristretto255 and SHA512 for `hash_into_group`. +//! +//! TODO: Use edwards25519 with [RFC 9380] instead. +//! +//! [Private Area Intersection]: https://willowprotocol.org/specs/pai/index.html +//! [RFC 9380]: https://www.rfc-editor.org/rfc/rfc9380 + +use curve25519_dalek::{ristretto::CompressedRistretto, RistrettoPoint, Scalar}; + +use crate::{ + proto::{ + grouping::SubspaceArea, + sync::ReadCapability, + willow::{NamespaceId, Path, SubspaceId}, + }, + util::codec::Encoder, +}; + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct PsiGroup(RistrettoPoint); + +#[derive(Debug, thiserror::Error)] +#[error("Invalid Psi Group")] +pub struct InvalidPsiGroup; + +impl PsiGroup { + pub fn from_bytes(bytes: [u8; 32]) -> Result { + let compressed = CompressedRistretto(bytes); + let uncompressed = compressed.decompress().ok_or(InvalidPsiGroup)?; + Ok(Self(uncompressed)) + } + + pub fn to_bytes(self) -> [u8; 32] { + self.0.compress().0 + } +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct PsiScalar(Scalar); + +#[derive(Debug)] +pub struct PaiScheme; + +impl PaiScheme { + pub fn hash_into_group(fragment: Fragment) -> PsiGroup { + let encoded = fragment.encode().expect("encoding not to fail"); + let point = RistrettoPoint::hash_from_bytes::(&encoded); + PsiGroup(point) + } + + pub fn get_scalar() -> PsiScalar { + PsiScalar(Scalar::random(&mut rand::thread_rng())) + } + + pub fn scalar_mult(group: PsiGroup, scalar: PsiScalar) -> PsiGroup { + PsiGroup(group.0 * scalar.0) + } + + pub fn is_group_equal(a: &PsiGroup, b: &PsiGroup) -> bool { + a == b + } + + pub fn get_fragment_kit(cap: &ReadCapability) -> FragmentKit { + let granted_area = cap.granted_area(); + let granted_namespace = cap.granted_namespace().id(); + let granted_path = granted_area.path.clone(); + + match granted_area.subspace { + SubspaceArea::Any => FragmentKit::Complete(granted_namespace, granted_path), + SubspaceArea::Id(granted_subspace) => { + FragmentKit::Selective(granted_namespace, granted_subspace, granted_path) + } + } + } +} + +#[derive(Debug, Clone)] +pub enum Fragment { + Pair(FragmentPair), + Triple(FragmentTriple), +} + +impl Encoder for Fragment { + fn encoded_len(&self) -> usize { + match self { + Fragment::Pair((_, path)) => NamespaceId::LENGTH + path.encoded_len(), + Fragment::Triple((_, _, path)) => { + NamespaceId::LENGTH + SubspaceId::LENGTH + path.encoded_len() + } + } + } + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + match self { + Fragment::Pair((namespace_id, path)) => { + out.write_all(namespace_id.as_bytes())?; + path.encode_into(out)?; + } + Fragment::Triple((namespace_id, subspace_id, path)) => { + out.write_all(namespace_id.as_bytes())?; + out.write_all(subspace_id.as_bytes())?; + path.encode_into(out)?; + } + } + Ok(()) + } +} + +pub type FragmentTriple = (NamespaceId, SubspaceId, Path); + +pub type FragmentPair = (NamespaceId, Path); + +#[derive(Debug, Clone)] +pub enum FragmentSet { + Complete(Vec), + Selective { + primary: Vec, + secondary: Vec, + }, +} + +#[derive(Debug)] +pub enum FragmentKit { + Complete(NamespaceId, Path), + Selective(NamespaceId, SubspaceId, Path), +} + +impl FragmentKit { + pub fn into_fragment_set(self) -> FragmentSet { + match self { + FragmentKit::Complete(namespace_id, path) => { + let mut pairs = vec![]; + for prefix in path.all_prefixes() { + pairs.push((namespace_id, prefix)); + } + FragmentSet::Complete(pairs) + } + FragmentKit::Selective(namespace_id, subspace_id, path) => { + let mut primary = vec![]; + let mut secondary = vec![]; + for prefix in path.all_prefixes() { + primary.push((namespace_id, subspace_id, prefix.clone())); + secondary.push((namespace_id, prefix.clone())); + } + FragmentSet::Selective { primary, secondary } + } + } + } +} diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index e4bc2d3390..ebc9463f3b 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -159,6 +159,19 @@ impl Path { pub fn components(&self) -> &[Component] { &self.0 } + + pub fn all_prefixes(&self) -> Vec { + let mut out = vec![Path::empty()]; + let components = self.components(); + if components.is_empty() { + return out; + } + for i in 1..=components.len() { + let prefix = Path::from_components(&components[..i]); + out.push(prefix); + } + out + } } impl std::ops::Deref for Path { diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index d4f12c0da5..8b53be4d0c 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -11,18 +11,18 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; -use curve25519_dalek::{ristretto::CompressedRistretto, RistrettoPoint, Scalar}; use futures_lite::StreamExt; use tracing::debug; use crate::{ proto::{ grouping::SubspaceArea, + pai::{Fragment, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, sync::{ IntersectionHandle, IntersectionMessage, PaiBindFragment, PaiReplyFragment, - PaiRequestSubspaceCapability, ReadAuthorisation, ReadCapability, + PaiRequestSubspaceCapability, ReadAuthorisation, }, - willow::{NamespaceId, Path, SubspaceId}, + willow::{NamespaceId, Path}, }, session::{ channels::MessageReceiver, @@ -30,7 +30,7 @@ use crate::{ Error, Scope, Session, }, store::{traits::Storage, Store}, - util::{codec::Encoder, stream::Cancelable}, + util::stream::Cancelable, }; #[derive(Debug, thiserror::Error)] @@ -189,7 +189,7 @@ impl PaiFinder { fragment: Fragment, is_secondary: bool, ) -> Result<(IntersectionHandle, PaiBindFragment)> { - let unmixed = PaiScheme::fragment_to_group(fragment); + let unmixed = PaiScheme::hash_into_group(fragment); let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); let info = FragmentInfo { group: multiplied, @@ -383,149 +383,6 @@ impl LocalFragmentInfo { } } -#[derive(Debug, Clone)] -pub enum Fragment { - Pair(FragmentPair), - Triple(FragmentTriple), -} - -impl Encoder for Fragment { - fn encoded_len(&self) -> usize { - match self { - Fragment::Pair((_, path)) => NamespaceId::LENGTH + path.encoded_len(), - Fragment::Triple((_, _, path)) => { - NamespaceId::LENGTH + SubspaceId::LENGTH + path.encoded_len() - } - } - } - fn encode_into(&self, out: &mut W) -> Result<()> { - match self { - Fragment::Pair((namespace_id, path)) => { - out.write_all(namespace_id.as_bytes())?; - path.encode_into(out)?; - } - Fragment::Triple((namespace_id, subspace_id, path)) => { - out.write_all(namespace_id.as_bytes())?; - out.write_all(subspace_id.as_bytes())?; - path.encode_into(out)?; - } - } - Ok(()) - } -} - -pub type FragmentTriple = (NamespaceId, SubspaceId, Path); - -pub type FragmentPair = (NamespaceId, Path); - -#[derive(Debug, Clone)] -pub enum FragmentSet { - Complete(Vec), - Selective { - primary: Vec, - secondary: Vec, - }, -} - -#[derive(Debug)] -pub enum FragmentKit { - Complete(NamespaceId, Path), - Selective(NamespaceId, SubspaceId, Path), -} - -impl FragmentKit { - fn into_fragment_set(self) -> FragmentSet { - match self { - FragmentKit::Complete(namespace_id, path) => { - let mut pairs = vec![]; - for prefix in prefixes_of(&path) { - pairs.push((namespace_id, prefix)); - } - FragmentSet::Complete(pairs) - } - FragmentKit::Selective(namespace_id, subspace_id, path) => { - let mut primary = vec![]; - let mut secondary = vec![]; - for prefix in prefixes_of(&path) { - primary.push((namespace_id, subspace_id, prefix.clone())); - secondary.push((namespace_id, prefix.clone())); - } - FragmentSet::Selective { primary, secondary } - } - } - } -} - -fn prefixes_of(path: &Path) -> Vec { - let mut out = vec![Path::empty()]; - let components = path.components(); - if components.is_empty() { - return out; - } - for i in 1..=components.len() { - let prefix = Path::from_components(&components[..i]); - out.push(prefix); - } - out -} - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub struct PsiGroup(RistrettoPoint); - -#[derive(Debug, thiserror::Error)] -#[error("Invalid Psi Group")] -pub struct InvalidPsiGroup; - -impl PsiGroup { - pub fn from_bytes(bytes: [u8; 32]) -> Result { - let compressed = CompressedRistretto(bytes); - let uncompressed = compressed.decompress().ok_or(InvalidPsiGroup)?; - Ok(Self(uncompressed)) - } - - pub fn to_bytes(self) -> [u8; 32] { - self.0.compress().0 - } -} - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub struct PsiScalar(Scalar); - -pub struct PaiScheme; - -impl PaiScheme { - fn fragment_to_group(fragment: Fragment) -> PsiGroup { - let encoded = fragment.encode().expect("encoding not to fail"); - let point = RistrettoPoint::hash_from_bytes::(&encoded); - PsiGroup(point) - } - - fn get_scalar() -> PsiScalar { - PsiScalar(Scalar::random(&mut rand::thread_rng())) - } - - fn scalar_mult(group: PsiGroup, scalar: PsiScalar) -> PsiGroup { - PsiGroup(group.0 * scalar.0) - } - - fn is_group_equal(a: &PsiGroup, b: &PsiGroup) -> bool { - a == b - } - - fn get_fragment_kit(cap: &ReadCapability) -> FragmentKit { - let granted_area = cap.granted_area(); - let granted_namespace = cap.granted_namespace().id(); - let granted_path = granted_area.path.clone(); - - match granted_area.subspace { - SubspaceArea::Any => FragmentKit::Complete(granted_namespace, granted_path), - SubspaceArea::Id(granted_subspace) => { - FragmentKit::Selective(granted_namespace, granted_subspace, granted_path) - } - } - } -} - #[derive(Debug, Clone, Eq, PartialEq)] pub enum FragmentState { Pending, From b5ba4f38feb2ef6c20e550eff290e8dcec0e08c5 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 28 Jun 2024 01:11:20 +0200 Subject: [PATCH 075/198] refactor: decouple PaiFinder from session and store, add test --- iroh-willow/src/auth.rs | 7 +- iroh-willow/src/proto/meadowcap.rs | 6 +- iroh-willow/src/proto/pai.rs | 25 +- iroh-willow/src/proto/sync.rs | 20 +- iroh-willow/src/session/pai.rs | 535 +++++++++++++++++++---------- iroh-willow/src/session/run.rs | 30 +- 6 files changed, 418 insertions(+), 205 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index d069dbbc81..cca2246c6c 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -291,7 +291,7 @@ impl Auth { .secrets .get_namespace(&namespace_id) .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(namespace_secret, user_key, AccessMode::Read) + McCapability::new_owned(&namespace_secret, user_key, AccessMode::Read) } NamespaceKind::Communal => { McCapability::new_communal(namespace_key, user_key, AccessMode::Read) @@ -314,7 +314,7 @@ impl Auth { .secrets .get_namespace(&namespace_id) .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(namespace_secret, user_key, AccessMode::Write) + McCapability::new_owned(&namespace_secret, user_key, AccessMode::Write) } NamespaceKind::Communal => { McCapability::new_communal(namespace_key, user_key, AccessMode::Write) @@ -356,7 +356,8 @@ impl Auth { restrict_area: Option, ) -> Result { let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; - let ReadAuthorisation(read_cap, _subspace_cap) = auth; + let read_cap = auth.read_cap(); + let _subspace_cap = auth.subspace_cap(); let user_id = read_cap.receiver().id(); let user_secret = self .secrets diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 85bd4d37be..ee5f59326f 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -144,12 +144,12 @@ pub enum McCapability { impl McCapability { pub fn new_owned( - namespace_secret: NamespaceSecretKey, + namespace_secret: &NamespaceSecretKey, user_key: UserPublicKey, access_mode: AccessMode, ) -> Self { McCapability::Owned(OwnedCapability::new( - &namespace_secret, + namespace_secret, user_key, access_mode, )) @@ -692,7 +692,7 @@ mod tests { let betty_secret = UserSecretKey::generate(&mut rng); let alfie_public = alfie_secret.public_key(); let betty_public = betty_secret.public_key(); - let cap = McCapability::new_owned(namespace_secret, alfie_public, AccessMode::Write); + let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::Write); cap.validate().expect("cap to be valid"); let cap_betty = cap .delegate(&alfie_secret, betty_public, Area::full()) diff --git a/iroh-willow/src/proto/pai.rs b/iroh-willow/src/proto/pai.rs index 349a9b3640..fe80c93617 100644 --- a/iroh-willow/src/proto/pai.rs +++ b/iroh-willow/src/proto/pai.rs @@ -44,7 +44,7 @@ pub struct PsiScalar(Scalar); pub struct PaiScheme; impl PaiScheme { - pub fn hash_into_group(fragment: Fragment) -> PsiGroup { + pub fn hash_into_group(fragment: &Fragment) -> PsiGroup { let encoded = fragment.encode().expect("encoding not to fail"); let point = RistrettoPoint::hash_from_bytes::(&encoded); PsiGroup(point) @@ -82,6 +82,17 @@ pub enum Fragment { Triple(FragmentTriple), } +impl Fragment { + pub fn into_parts(self) -> (NamespaceId, SubspaceArea, Path) { + match self { + Fragment::Pair((namespace_id, path)) => (namespace_id, SubspaceArea::Any, path), + Fragment::Triple((namespace_id, subspace_id, path)) => { + (namespace_id, SubspaceArea::Id(subspace_id), path) + } + } + } +} + impl Encoder for Fragment { fn encoded_len(&self) -> usize { match self { @@ -111,6 +122,18 @@ pub type FragmentTriple = (NamespaceId, SubspaceId, Path); pub type FragmentPair = (NamespaceId, Path); +#[derive(Debug, Clone, Copy)] +pub enum FragmentKind { + Primary, + Secondary, +} + +impl FragmentKind { + pub fn is_secondary(&self) -> bool { + matches!(self, FragmentKind::Secondary) + } +} + #[derive(Debug, Clone)] pub enum FragmentSet { Complete(Vec), diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index b887105d47..101b9501f3 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -1,4 +1,4 @@ -use std::{fmt, io::Write}; +use std::{fmt, io::Write, sync::Arc}; use iroh_base::hash::Hash; @@ -56,29 +56,29 @@ pub type Receiver = meadowcap::UserPublicKey; /// Represents an authorisation to read an area of data in a Namespace. #[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] -pub struct ReadAuthorisation(pub ReadCapability, pub Option); +pub struct ReadAuthorisation(Arc<(ReadCapability, Option)>); impl From for ReadAuthorisation { fn from(value: ReadCapability) -> Self { - Self(value, None) + Self(Arc::new((value, None))) } } impl ReadAuthorisation { pub fn new(read_cap: ReadCapability, subspace_cap: Option) -> Self { - Self(read_cap, subspace_cap) + Self(Arc::new((read_cap, subspace_cap))) } pub fn read_cap(&self) -> &ReadCapability { - &self.0 + &self.0 .0 } pub fn subspace_cap(&self) -> Option<&SubspaceCapability> { - self.1.as_ref() + self.0 .1.as_ref() } pub fn namespace(&self) -> NamespaceId { - self.0.granted_namespace().id() + self.0 .0.granted_namespace().id() } } @@ -880,20 +880,22 @@ pub struct ControlFreeHandle { pub type PsiGroupBytes = [u8; 32]; /// Bind data to an IntersectionHandle for performing private area intersection. -#[derive(Debug, Serialize, Deserialize)] +#[derive(derive_more::Debug, Serialize, Deserialize)] pub struct PaiBindFragment { /// The result of first applying hash_into_group to some fragment for private area intersection and then performing scalar multiplication with scalar. + #[debug("{}", hex::encode(self.group_member))] pub group_member: PsiGroupBytes, /// Set to true if the private set intersection item is a secondary fragment. pub is_secondary: bool, } /// Finalise private set intersection for a single item. -#[derive(Debug, Serialize, Deserialize)] +#[derive(derive_more::Debug, Serialize, Deserialize)] pub struct PaiReplyFragment { /// The IntersectionHandle of the PaiBindFragment message which this finalises. pub handle: IntersectionHandle, /// The result of performing scalar multiplication between the group_member of the message that this is replying to and scalar. + #[debug("{}", hex::encode(self.group_member))] pub group_member: PsiGroupBytes, } diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index 8b53be4d0c..864fe89097 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -11,26 +11,25 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; -use futures_lite::StreamExt; -use tracing::debug; +use futures_lite::{Stream, StreamExt}; +use genawaiter::GeneratorState; +use tracing::{debug, trace}; use crate::{ proto::{ grouping::SubspaceArea, - pai::{Fragment, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, + pai::{Fragment, FragmentKind, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, sync::{ - IntersectionHandle, IntersectionMessage, PaiBindFragment, PaiReplyFragment, - PaiRequestSubspaceCapability, ReadAuthorisation, + IntersectionHandle, IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, + PaiRequestSubspaceCapability, ReadAuthorisation, SubspaceCapability, }, willow::{NamespaceId, Path}, }, session::{ - channels::MessageReceiver, resource::{MissingResource, ResourceMap}, Error, Scope, Session, }, store::{traits::Storage, Store}, - util::stream::Cancelable, }; #[derive(Debug, thiserror::Error)] @@ -44,88 +43,115 @@ pub enum PaiError { } #[derive(Debug)] -pub enum ToPai { +pub enum Input { SubmitAuthorisation(ReadAuthorisation), + ReceivedMessage(Result), ReceivedSubspaceCapRequest(IntersectionHandle), ReceivedVerifiedSubspaceCapReply(IntersectionHandle, NamespaceId), ReceivedReadCapForIntersection(IntersectionHandle), } #[derive(Debug)] -pub struct PaiFinder { - session: Session, - store: Store, +pub enum Output { + SendMessage(Message), + NewIntersection(PaiIntersection), + SignAndSendSubspaceCap(IntersectionHandle, SubspaceCapability), +} + +#[derive(derive_more::Debug)] +pub struct PaiFinder { + #[debug("Co")] + co: genawaiter::rc::Co, scalar: PsiScalar, fragments_info: HashMap, - our_intersection_handles: ResourceMap, - their_intersection_handles: ResourceMap, + our_intersection_handles: ResourceMap, + their_intersection_handles: ResourceMap, requested_subspace_cap_handles: HashSet, } -impl PaiFinder { - pub fn new(session: Session, store: Store) -> Self { - Self { - session, - store, - scalar: PaiScheme::get_scalar(), - our_intersection_handles: Default::default(), - their_intersection_handles: Default::default(), - fragments_info: Default::default(), - requested_subspace_cap_handles: Default::default(), +impl PaiFinder { + pub async fn run_with_session( + session: Session, + store: Store, + inbox: impl Stream + Unpin, + ) -> Result<(), Error> { + let mut gen = genawaiter::rc::Gen::new(|co| PaiFinder::new(co).run(inbox)); + loop { + match gen.async_resume().await { + GeneratorState::Yielded(output) => match output { + Output::SendMessage(message) => session.send(message).await?, + Output::NewIntersection(intersection) => { + session.push_pai_intersection(intersection) + } + Output::SignAndSendSubspaceCap(handle, cap) => { + let message = + session.sign_subspace_capabiltiy(store.secrets(), &cap, handle)?; + session.send(Box::new(message)).await?; + } + }, + GeneratorState::Complete(res) => break res, + } } } - pub async fn run( - mut self, - to_pai: flume::Receiver, - mut recv: Cancelable>, + #[cfg(test)] + pub async fn run_with_sink( + inbox: impl Stream + Unpin, + mut outbox: impl futures_util::Sink + Unpin, ) -> Result<(), Error> { + use futures_util::SinkExt; + let mut gen = genawaiter::rc::Gen::new(|co| PaiFinder::new(co).run(inbox)); loop { - tokio::select! { - action = to_pai.recv_async() => { - match action { - Err(_) => break, - Ok(action) => self.on_action(action).await? - } - } - message = recv.next() => { - match message { - None => break, - Some(message) => self.on_message(message?).await? - } - } + let y = gen.async_resume().await; + match y { + GeneratorState::Yielded(output) => outbox.send(output).await?, + GeneratorState::Complete(res) => break res, } } - Ok(()) } - async fn on_message(&mut self, message: IntersectionMessage) -> Result<(), Error> { - debug!("on_message {message:?}"); - match message { - IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, - IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, + pub fn new(co: genawaiter::rc::Co) -> Self { + Self { + co, + scalar: PaiScheme::get_scalar(), + our_intersection_handles: Default::default(), + their_intersection_handles: Default::default(), + fragments_info: Default::default(), + requested_subspace_cap_handles: Default::default(), + } + } + + pub async fn run(mut self, mut inbox: impl Stream + Unpin) -> Result<(), Error> { + while let Some(input) = inbox.next().await { + trace!("pai input {input:?}"); + self.input(input).await?; } Ok(()) } - async fn on_action(&mut self, action: ToPai) -> Result<(), Error> { - debug!("on_action {action:?}"); - match action { - ToPai::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await?, - ToPai::ReceivedSubspaceCapRequest(handle) => { + async fn input(&mut self, input: Input) -> Result<(), Error> { + match input { + Input::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await?, + Input::ReceivedMessage(message) => match message? { + IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, + IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, + }, + Input::ReceivedSubspaceCapRequest(handle) => { self.received_subspace_cap_request(handle).await? } - ToPai::ReceivedVerifiedSubspaceCapReply(handle, namespace) => { - self.received_verified_subspace_cap_reply(handle, namespace)? + Input::ReceivedVerifiedSubspaceCapReply(handle, namespace) => { + self.received_verified_subspace_cap_reply(handle, namespace) + .await? } - ToPai::ReceivedReadCapForIntersection(handle) => { - self.received_read_cap_for_intersection(handle)? + Input::ReceivedReadCapForIntersection(handle) => { + self.received_read_cap_for_intersection(handle).await? } } Ok(()) } async fn submit_autorisation(&mut self, authorisation: ReadAuthorisation) -> Result<(), Error> { + trace!(?authorisation, "pai submit auth"); let read_cap = authorisation.read_cap(); let fragment_kit = PaiScheme::get_fragment_kit(read_cap); let fragment_set = fragment_kit.into_fragment_set(); @@ -133,72 +159,57 @@ impl PaiFinder { FragmentSet::Complete(pairs) => { let last = pairs.len().wrapping_sub(1); for (i, pair) in pairs.into_iter().enumerate() { - let is_most_specific = i == last; - let (namespace_id, path) = pair.clone(); - let (handle, message) = self.submit_fragment(Fragment::Pair(pair), false)?; - let info = LocalFragmentInfo { - on_intersection: IntersectionAction::new_primary(is_most_specific), - authorisation: authorisation.clone(), - namespace_id, - path, - subspace: SubspaceArea::Any, - }; - self.fragments_info.insert(handle, info); - self.session.send(message).await?; + self.submit_fragment( + authorisation.clone(), + Fragment::Pair(pair), + FragmentKind::Primary, + i == last, + ) + .await?; } } FragmentSet::Selective { primary, secondary } => { let last = primary.len().wrapping_sub(1); for (i, triple) in primary.into_iter().enumerate() { - let is_most_specific = i == last; - let (namespace_id, subspace_id, path) = triple.clone(); - let (handle, message) = - self.submit_fragment(Fragment::Triple(triple), false)?; - let info = LocalFragmentInfo { - on_intersection: IntersectionAction::new_primary(is_most_specific), - authorisation: authorisation.clone(), - namespace_id, - path, - subspace: SubspaceArea::Id(subspace_id), - }; - self.fragments_info.insert(handle, info); - self.session.send(message).await?; + self.submit_fragment( + authorisation.clone(), + Fragment::Triple(triple), + FragmentKind::Primary, + i == last, + ) + .await?; } let last = secondary.len().wrapping_sub(1); for (i, pair) in secondary.into_iter().enumerate() { - let is_most_specific = i == last; - let (namespace_id, path) = pair.clone(); - let (handle, message) = self.submit_fragment(Fragment::Pair(pair), true)?; - let info = LocalFragmentInfo { - on_intersection: IntersectionAction::new_secondary(is_most_specific), - authorisation: authorisation.clone(), - namespace_id, - path, - subspace: SubspaceArea::Any, - }; - self.fragments_info.insert(handle, info); - self.session.send(message).await?; + self.submit_fragment( + authorisation.clone(), + Fragment::Pair(pair), + FragmentKind::Secondary, + i == last, + ) + .await?; } } } Ok(()) } - fn submit_fragment( + async fn submit_fragment( &mut self, + authorisation: ReadAuthorisation, fragment: Fragment, - is_secondary: bool, - ) -> Result<(IntersectionHandle, PaiBindFragment)> { - let unmixed = PaiScheme::hash_into_group(fragment); + kind: FragmentKind, + is_most_specific: bool, + ) -> Result { + let unmixed = PaiScheme::hash_into_group(&fragment); let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); - let info = FragmentInfo { - group: multiplied, - state: FragmentState::Pending, - is_secondary, - }; - let message = info.to_message(); - let handle = self.our_intersection_handles.bind(info); - Ok((handle, message)) + let group_state = GroupState::new_pending(multiplied, kind.is_secondary()); + let message = group_state.to_message(); + let handle = self.our_intersection_handles.bind(group_state); + let info = LocalFragmentInfo::new(authorisation, fragment, kind, is_most_specific); + self.fragments_info.insert(handle, info); + self.out(Output::SendMessage(message.into())).await; + Ok(handle) } async fn receive_bind(&mut self, message: PaiBindFragment) -> Result<()> { @@ -208,17 +219,13 @@ impl PaiFinder { } = message; let unmixed = PsiGroup::from_bytes(group_member)?; let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); - let fragment = FragmentInfo { - group: multiplied, - is_secondary, - state: FragmentState::Pending, - }; - let handle = self.their_intersection_handles.bind(fragment); + let group_state = GroupState::new_complete(multiplied, is_secondary); + let handle = self.their_intersection_handles.bind(group_state); let reply = PaiReplyFragment { handle, - group_member, + group_member: multiplied.to_bytes(), }; - self.session.send(reply).await?; + self.out(Output::SendMessage(reply.into())).await; self.check_for_intersection(handle, Scope::Theirs).await?; Ok(()) } @@ -228,14 +235,10 @@ impl PaiFinder { handle, group_member, } = message; - let group_member = PsiGroup::from_bytes(group_member)?; - let intersection = self.our_intersection_handles.try_get(&handle)?; - let fragment = FragmentInfo { - group: group_member, - is_secondary: intersection.is_secondary, - state: FragmentState::Complete, - }; - self.our_intersection_handles.update(handle, fragment)?; + let group = PsiGroup::from_bytes(group_member)?; + let our_state = self.our_intersection_handles.try_get(&handle)?; + let next_state = GroupState::new_complete(group, our_state.is_secondary); + self.our_intersection_handles.update(handle, next_state)?; self.check_for_intersection(handle, Scope::Ours).await?; Ok(()) } @@ -245,12 +248,15 @@ impl PaiFinder { handle: IntersectionHandle, scope: Scope, ) -> Result<(), Error> { - let store_to_check = match scope { + let store_to_get_handle_from = match scope { Scope::Ours => &self.our_intersection_handles, Scope::Theirs => &self.their_intersection_handles, }; - let intersection = store_to_check.try_get(&handle)?; - + let store_to_check_against = match scope { + Scope::Ours => &self.their_intersection_handles, + Scope::Theirs => &self.our_intersection_handles, + }; + let intersection = store_to_get_handle_from.try_get(&handle)?; if !intersection.is_complete() { return Ok(()); } @@ -258,7 +264,7 @@ impl PaiFinder { // Here we are looping through the whole contents of the handle store because... // otherwise we need to build a special handle store just for intersections. // Which we might do one day, but I'm not convinced it's worth it yet. - for (other_handle, other_intersection) in store_to_check.iter() { + for (other_handle, other_intersection) in store_to_check_against.iter() { if !other_intersection.completes_with(intersection) { continue; } @@ -269,29 +275,25 @@ impl PaiFinder { Scope::Theirs => *other_handle, }; - let fragment_info = self - .fragments_info - .get(&our_handle) - .ok_or(Error::MissingResource(our_handle.into()))?; - - match fragment_info.on_intersection { - IntersectionAction::BindReadCap => { - let intersection = fragment_info.to_pai_intersection(our_handle); - self.session.push_pai_intersection(intersection); + let info = self.fragment_info(&our_handle)?; + match info.on_intersection { + OnIntersection::BindReadCap => { + let intersection = info.to_pai_intersection(our_handle); + self.out(Output::NewIntersection(intersection)).await; } - IntersectionAction::RequestSubspaceCap => { + OnIntersection::RequestSubspaceCap => { self.requested_subspace_cap_handles.insert(our_handle); let message = PaiRequestSubspaceCapability { handle }; - self.session.send(message).await?; + self.out(Output::SendMessage(message.into())).await; } - IntersectionAction::ReplyReadCap | IntersectionAction::DoNothing => {} + OnIntersection::ReplyReadCap | OnIntersection::DoNothing => {} } } Ok(()) } - fn received_read_cap_for_intersection( + async fn received_read_cap_for_intersection( &mut self, their_handle: IntersectionHandle, ) -> Result<()> { @@ -300,19 +302,16 @@ impl PaiFinder { if !our_intersection.completes_with(their_intersection) { continue; } - let fragment_info = self - .fragments_info - .get(our_handle) - .ok_or(Error::MissingResource((*our_handle).into()))?; - if let IntersectionAction::ReplyReadCap = fragment_info.on_intersection { + let fragment_info = self.fragment_info(our_handle)?; + if let OnIntersection::ReplyReadCap = fragment_info.on_intersection { let intersection = fragment_info.to_pai_intersection(*our_handle); - self.session.push_pai_intersection(intersection); + self.out(Output::NewIntersection(intersection)).await; } } Ok(()) } - fn received_verified_subspace_cap_reply( + async fn received_verified_subspace_cap_reply( &mut self, handle: IntersectionHandle, namespace_id: NamespaceId, @@ -321,16 +320,12 @@ impl PaiFinder { return Err(PaiError::SubspaceCapRequestForInvalidHandle); } let _ = self.our_intersection_handles.try_get(&handle)?; - let fragment_info = self - .fragments_info - .get(&handle) - .ok_or(PaiError::SubspaceCapRequestForInvalidHandle)?; - + let fragment_info = self.fragment_info(&handle)?; if fragment_info.namespace_id != namespace_id { return Err(PaiError::SubspaceCapRequestForWrongNamespace); } let intersection = fragment_info.to_pai_intersection(handle); - self.session.push_pai_intersection(intersection); + self.out(Output::NewIntersection(intersection)).await; Ok(()) } @@ -346,24 +341,32 @@ impl PaiFinder { if !PaiScheme::is_group_equal(&result.group, &intersection.group) { continue; } - let fragment_info = self - .fragments_info - .get(our_handle) - .ok_or(PaiError::SubspaceCapRequestForInvalidHandle)?; + let fragment_info = self.fragment_info(our_handle)?; if let Some(cap) = fragment_info.authorisation.subspace_cap() { - let message = - self.session - .sign_subspace_capabiltiy(self.store.secrets(), cap, handle)?; - self.session.send(Box::new(message)).await?; + self.out(Output::SignAndSendSubspaceCap(handle, cap.clone())) + .await; } } Ok(()) } + + async fn out(&self, out: Output) { + self.co.yield_(out).await + } + + fn fragment_info( + &self, + handle: &IntersectionHandle, + ) -> Result<&LocalFragmentInfo, MissingResource> { + self.fragments_info + .get(handle) + .ok_or(MissingResource((*handle).into())) + } } #[derive(Debug)] pub struct LocalFragmentInfo { - on_intersection: IntersectionAction, + on_intersection: OnIntersection, authorisation: ReadAuthorisation, namespace_id: NamespaceId, // will be needed for spec-compliant encodings of read capabilities @@ -375,6 +378,22 @@ pub struct LocalFragmentInfo { } impl LocalFragmentInfo { + fn new( + authorisation: ReadAuthorisation, + fragment: Fragment, + kind: FragmentKind, + is_most_specific: bool, + ) -> Self { + let (namespace_id, subspace, path) = fragment.into_parts(); + let on_intersection = OnIntersection::new(kind, is_most_specific); + LocalFragmentInfo { + on_intersection, + authorisation, + namespace_id, + path, + subspace, + } + } fn to_pai_intersection(&self, handle: IntersectionHandle) -> PaiIntersection { PaiIntersection { authorisation: self.authorisation.clone(), @@ -384,25 +403,42 @@ impl LocalFragmentInfo { } #[derive(Debug, Clone, Eq, PartialEq)] -pub enum FragmentState { +enum PendingState { Pending, Complete, } #[derive(Debug, Clone, Eq, PartialEq)] -pub struct FragmentInfo { +struct GroupState { group: PsiGroup, - state: FragmentState, + state: PendingState, is_secondary: bool, } +impl GroupState { + fn new_pending(group: PsiGroup, is_secondary: bool) -> Self { + Self { + group, + state: PendingState::Pending, + is_secondary, + } + } + fn new_complete(group: PsiGroup, is_secondary: bool) -> Self { + Self { + group, + state: PendingState::Complete, + is_secondary, + } + } +} + #[derive(Debug)] pub struct PaiIntersection { pub authorisation: ReadAuthorisation, pub handle: IntersectionHandle, } -impl FragmentInfo { +impl GroupState { fn to_message(&self) -> PaiBindFragment { PaiBindFragment { group_member: self.group.to_bytes(), @@ -411,7 +447,7 @@ impl FragmentInfo { } fn is_complete(&self) -> bool { - matches!(self.state, FragmentState::Complete) + matches!(self.state, PendingState::Complete) } fn is_secondary(&self) -> bool { @@ -433,27 +469,174 @@ impl FragmentInfo { } #[derive(Debug, Clone, Copy)] -pub enum IntersectionAction { +pub enum OnIntersection { DoNothing, BindReadCap, RequestSubspaceCap, ReplyReadCap, } -impl IntersectionAction { - pub fn new_primary(is_most_specific: bool) -> Self { - if is_most_specific { - IntersectionAction::BindReadCap - } else { - IntersectionAction::ReplyReadCap +impl OnIntersection { + pub fn new(fragment_kind: FragmentKind, is_most_specific: bool) -> Self { + match (fragment_kind, is_most_specific) { + (FragmentKind::Primary, true) => OnIntersection::BindReadCap, + (FragmentKind::Primary, false) => OnIntersection::ReplyReadCap, + (FragmentKind::Secondary, true) => OnIntersection::RequestSubspaceCap, + (FragmentKind::Secondary, false) => OnIntersection::DoNothing, + } + } +} + +#[cfg(test)] +mod tests { + use futures_util::SinkExt; + use rand_core::SeedableRng; + use tokio::task::{spawn_local, JoinHandle}; + use tracing::{error_span, Instrument, Span}; + + use crate::{ + proto::{ + keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, + meadowcap::{AccessMode, McCapability}, + sync::{ + IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, ReadAuthorisation, + }, + }, + session::Error, + }; + + use super::{Input, Output, PaiFinder}; + + #[tokio::test] + async fn pai_smoke() { + iroh_test::logging::setup_multithreaded(); + let local = tokio::task::LocalSet::new(); + local.run_until(pai_smoke_inner()).await + } + async fn pai_smoke_inner() { + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + + let (alfie, betty) = Handle::create_two(); + + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); + let alfie_secret = UserSecretKey::generate(&mut rng); + let betty_secret = UserSecretKey::generate(&mut rng); + let alfie_public = alfie_secret.public_key(); + let betty_public = betty_secret.public_key(); + + let read_cap_alfie = + McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::Read); + let read_cap_betty = + McCapability::new_owned(&namespace_secret, betty_public, AccessMode::Read); + let auth_alfie = ReadAuthorisation::new(read_cap_alfie.clone(), None); + let auth_betty = ReadAuthorisation::new(read_cap_betty.clone(), None); + + alfie.submit(auth_alfie.clone()).await; + let alfie_bind = alfie.next_out_message::().await; + + betty.submit(auth_betty.clone()).await; + let betty_bind = betty.next_out_message::().await; + + alfie.receive(betty_bind).await; + betty.receive(alfie_bind).await; + + let alfie_reply = alfie.next_out_message::().await; + let betty_reply = betty.next_out_message::().await; + + betty.receive(alfie_reply).await; + alfie.receive(betty_reply).await; + + match alfie.next_out().await { + Output::NewIntersection(intersection) => { + assert_eq!( + intersection.authorisation, + ReadAuthorisation::new(read_cap_alfie, None) + ); + } + out => panic!("expected NewIntersection but got {out:?}"), + } + match betty.next_out().await { + Output::NewIntersection(intersection) => { + assert_eq!( + intersection.authorisation, + ReadAuthorisation::new(read_cap_betty, None) + ); + } + out => panic!("expected NewIntersection but got {out:?}"), } + + alfie.join().await; + betty.join().await; + } + + struct Handle { + task: JoinHandle>, + input: flume::Sender, + output: flume::Receiver, } + impl Handle { + pub fn create_two() -> (Self, Self) { + ( + Self::new(error_span!("alfie")), + Self::new(error_span!("betty")), + ) + } + + pub fn new(span: Span) -> Self { + let (input, input_rx) = flume::bounded(1); + let (output_tx, output) = flume::bounded(1); + let task = spawn_local( + async move { + PaiFinder::run_with_sink( + input_rx.into_stream(), + output_tx + .into_sink() + .sink_map_err(|_| Error::InvalidState("failed to send")), + ) + .await + } + .instrument(span), + ); + Handle { + input, + output, + task, + } + } + + pub async fn input(&self, input: Input) { + self.input.send_async(input).await.unwrap(); + } + + pub async fn submit(&self, auth: ReadAuthorisation) { + self.input(Input::SubmitAuthorisation(auth)).await + } + + pub async fn receive(&self, message: impl Into) { + self.input + .send_async(Input::ReceivedMessage(Ok(message.into()))) + .await + .unwrap(); + } + + pub async fn next_out(&self) -> Output { + self.output.recv_async().await.unwrap() + } + + pub async fn next_out_message>(&self) -> T { + match self.next_out().await { + Output::SendMessage(message) => match T::try_from(message) { + Err(_err) => panic!("wrong message type"), + Ok(message) => message, + }, + other => panic!("expected SendMessage but got {other:?}"), + } + } - pub fn new_secondary(is_most_specific: bool) -> Self { - if is_most_specific { - IntersectionAction::RequestSubspaceCap - } else { - IntersectionAction::DoNothing + pub async fn join(self) { + drop(self.input); + drop(self.output); + self.task.await.unwrap().unwrap() } } } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index c235a06ec5..4f3b16e6a1 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,4 +1,5 @@ -use futures_lite::StreamExt; +use futures_concurrency::stream::StreamExt as _; +use futures_lite::StreamExt as _; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace, warn}; @@ -7,7 +8,7 @@ use crate::{ proto::sync::{ControlIssueGuarantee, LogicalChannel, Message}, session::{ channels::LogicalChannelReceivers, - pai::{PaiFinder, ToPai}, + pai::{self, PaiFinder}, Error, Session, }, store::{traits::Storage, Store}, @@ -53,10 +54,13 @@ impl Session { let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); // Setup the private area intersection finder. - let pai_finder = PaiFinder::new(self.clone(), store.clone()); - let (to_pai_tx, to_pai_rx) = flume::bounded(128); + let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); self.spawn(error_span!("pai"), { - move |_session| async move { pai_finder.run(to_pai_rx, intersection_recv).await } + let store = store.clone(); + let inbox = pai_inbox_rx + .into_stream() + .merge(intersection_recv.map(pai::Input::ReceivedMessage)); + move |session| async move { PaiFinder::run_with_session(session, store, inbox).await } }); // Spawn a task to handle incoming static tokens. @@ -90,13 +94,13 @@ impl Session { // Spawn a task to handle incoming capabilities. self.spawn(error_span!("cap"), { - let to_pai = to_pai_tx.clone(); + let to_pai = pai_inbox_tx.clone(); move |session| async move { while let Some(message) = capability_recv.try_next().await? { let handle = message.handle; session.on_setup_bind_read_capability(message)?; to_pai - .send_async(ToPai::ReceivedReadCapForIntersection(handle)) + .send_async(pai::Input::ReceivedReadCapForIntersection(handle)) .await .map_err(|_| Error::InvalidState("PAI actor dead"))?; } @@ -145,7 +149,7 @@ impl Session { self.spawn(error_span!("ctl"), { let cancel_token = cancel_token.clone(); move |session| async move { - let res = control_loop(session, control_recv, to_pai_tx).await; + let res = control_loop(session, control_recv, pai_inbox_tx).await; cancel_token.cancel(); res } @@ -202,7 +206,7 @@ impl Session { async fn control_loop( session: Session, mut control_recv: Cancelable>, - to_pai: flume::Sender, + to_pai: flume::Sender, ) -> Result<(), Error> { debug!(role = ?session.our_role(), "start session"); let mut commitment_revealed = false; @@ -240,14 +244,14 @@ async fn control_loop( } Message::PaiRequestSubspaceCapability(msg) => { to_pai - .send_async(ToPai::ReceivedSubspaceCapRequest(msg.handle)) + .send_async(pai::Input::ReceivedSubspaceCapRequest(msg.handle)) .await .map_err(|_| Error::InvalidState("PAI actor dead"))?; } Message::PaiReplySubspaceCapability(msg) => { session.verify_subspace_capability(&msg)?; to_pai - .send_async(ToPai::ReceivedVerifiedSubspaceCapReply( + .send_async(pai::Input::ReceivedVerifiedSubspaceCapReply( msg.handle, msg.capability.granted_namespace().id(), )) @@ -261,10 +265,10 @@ async fn control_loop( Ok(()) } -async fn setup_pai(session: Session, to_pai: flume::Sender) -> Result<(), Error> { +async fn setup_pai(session: Session, to_pai: flume::Sender) -> Result<(), Error> { for authorisation in session.interests().keys() { to_pai - .send_async(ToPai::SubmitAuthorisation(authorisation.clone())) + .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) .await .map_err(|_| Error::InvalidState("PAI actor dead"))?; } From 186f80c6b9e7168106ce64e1de6617e684def9b1 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 28 Jun 2024 17:01:18 +0200 Subject: [PATCH 076/198] refactor: cleanup and test improvements --- iroh-willow/src/proto/meadowcap.rs | 11 +++ iroh-willow/src/proto/sync.rs | 9 ++- iroh-willow/src/session/pai.rs | 124 ++++++++++++----------------- 3 files changed, 70 insertions(+), 74 deletions(-) diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index ee5f59326f..8f2ece9fce 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -596,6 +596,17 @@ pub struct McSubspaceCapability { } impl McSubspaceCapability { + pub fn new(namespace_secret_key: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { + let namespace_key = namespace_secret_key.public_key(); + let handover = Self::initial_handover(&user_key); + let initial_authorisation = namespace_secret_key.sign(&handover); + Self { + namespace_key, + user_key, + initial_authorisation, + delegations: Default::default(), + } + } pub fn receiver(&self) -> &UserPublicKey { &self.user_key } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 101b9501f3..39f222d9f5 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -9,7 +9,8 @@ use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, - meadowcap, + keys::{NamespaceSecretKey, UserPublicKey}, + meadowcap::{self, AccessMode}, willow::{Entry, NamespaceId, DIGEST_LENGTH}, }; @@ -69,6 +70,12 @@ impl ReadAuthorisation { Self(Arc::new((read_cap, subspace_cap))) } + pub fn new_owned(namespace_secret: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { + let read_cap = ReadCapability::new_owned(namespace_secret, user_key, AccessMode::Read); + let subspace_cap = SubspaceCapability::new(namespace_secret, user_key); + Self::new(read_cap, Some(subspace_cap)) + } + pub fn read_cap(&self) -> &ReadCapability { &self.0 .0 } diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index 864fe89097..6baa21014c 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -42,6 +42,12 @@ pub enum PaiError { MissingResource(#[from] MissingResource), } +#[derive(Debug)] +pub struct PaiIntersection { + pub authorisation: ReadAuthorisation, + pub handle: IntersectionHandle, +} + #[derive(Debug)] pub enum Input { SubmitAuthorisation(ReadAuthorisation), @@ -131,7 +137,7 @@ impl PaiFinder { async fn input(&mut self, input: Input) -> Result<(), Error> { match input { - Input::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await?, + Input::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await, Input::ReceivedMessage(message) => match message? { IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, @@ -150,7 +156,7 @@ impl PaiFinder { Ok(()) } - async fn submit_autorisation(&mut self, authorisation: ReadAuthorisation) -> Result<(), Error> { + async fn submit_autorisation(&mut self, authorisation: ReadAuthorisation) { trace!(?authorisation, "pai submit auth"); let read_cap = authorisation.read_cap(); let fragment_kit = PaiScheme::get_fragment_kit(read_cap); @@ -165,7 +171,7 @@ impl PaiFinder { FragmentKind::Primary, i == last, ) - .await?; + .await; } } FragmentSet::Selective { primary, secondary } => { @@ -177,7 +183,7 @@ impl PaiFinder { FragmentKind::Primary, i == last, ) - .await?; + .await; } let last = secondary.len().wrapping_sub(1); for (i, pair) in secondary.into_iter().enumerate() { @@ -187,11 +193,10 @@ impl PaiFinder { FragmentKind::Secondary, i == last, ) - .await?; + .await; } } } - Ok(()) } async fn submit_fragment( @@ -200,16 +205,16 @@ impl PaiFinder { fragment: Fragment, kind: FragmentKind, is_most_specific: bool, - ) -> Result { + ) -> IntersectionHandle { let unmixed = PaiScheme::hash_into_group(&fragment); let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); let group_state = GroupState::new_pending(multiplied, kind.is_secondary()); - let message = group_state.to_message(); + let message = group_state.to_bind_fragment_message(); let handle = self.our_intersection_handles.bind(group_state); let info = LocalFragmentInfo::new(authorisation, fragment, kind, is_most_specific); self.fragments_info.insert(handle, info); self.out(Output::SendMessage(message.into())).await; - Ok(handle) + handle } async fn receive_bind(&mut self, message: PaiBindFragment) -> Result<()> { @@ -221,11 +226,11 @@ impl PaiFinder { let multiplied = PaiScheme::scalar_mult(unmixed, self.scalar); let group_state = GroupState::new_complete(multiplied, is_secondary); let handle = self.their_intersection_handles.bind(group_state); - let reply = PaiReplyFragment { + let message = PaiReplyFragment { handle, group_member: multiplied.to_bytes(), }; - self.out(Output::SendMessage(reply.into())).await; + self.out(Output::SendMessage(message.into())).await; self.check_for_intersection(handle, Scope::Theirs).await?; Ok(()) } @@ -394,6 +399,7 @@ impl LocalFragmentInfo { subspace, } } + fn to_pai_intersection(&self, handle: IntersectionHandle) -> PaiIntersection { PaiIntersection { authorisation: self.authorisation.clone(), @@ -432,14 +438,8 @@ impl GroupState { } } -#[derive(Debug)] -pub struct PaiIntersection { - pub authorisation: ReadAuthorisation, - pub handle: IntersectionHandle, -} - impl GroupState { - fn to_message(&self) -> PaiBindFragment { + fn to_bind_fragment_message(&self) -> PaiBindFragment { PaiBindFragment { group_member: self.group.to_bytes(), is_secondary: self.is_secondary, @@ -497,7 +497,6 @@ mod tests { use crate::{ proto::{ keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, - meadowcap::{AccessMode, McCapability}, sync::{ IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, ReadAuthorisation, }, @@ -516,59 +515,39 @@ mod tests { async fn pai_smoke_inner() { let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let (alfie, betty) = Handle::create_two(); + let namespace = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); let alfie_secret = UserSecretKey::generate(&mut rng); let betty_secret = UserSecretKey::generate(&mut rng); let alfie_public = alfie_secret.public_key(); let betty_public = betty_secret.public_key(); - let read_cap_alfie = - McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::Read); - let read_cap_betty = - McCapability::new_owned(&namespace_secret, betty_public, AccessMode::Read); - let auth_alfie = ReadAuthorisation::new(read_cap_alfie.clone(), None); - let auth_betty = ReadAuthorisation::new(read_cap_betty.clone(), None); + let auth_alfie = ReadAuthorisation::new_owned(&namespace, alfie_public); + let auth_betty = ReadAuthorisation::new_owned(&namespace, betty_public); - alfie.submit(auth_alfie.clone()).await; - let alfie_bind = alfie.next_out_message::().await; + let (alfie, betty) = Handle::create_two(); + alfie.submit(auth_alfie.clone()).await; betty.submit(auth_betty.clone()).await; - let betty_bind = betty.next_out_message::().await; - - alfie.receive(betty_bind).await; - betty.receive(alfie_bind).await; - let alfie_reply = alfie.next_out_message::().await; - let betty_reply = betty.next_out_message::().await; + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; - betty.receive(alfie_reply).await; - alfie.receive(betty_reply).await; - - match alfie.next_out().await { - Output::NewIntersection(intersection) => { - assert_eq!( - intersection.authorisation, - ReadAuthorisation::new(read_cap_alfie, None) - ); - } - out => panic!("expected NewIntersection but got {out:?}"), - } - match betty.next_out().await { - Output::NewIntersection(intersection) => { - assert_eq!( - intersection.authorisation, - ReadAuthorisation::new(read_cap_betty, None) - ); - } - out => panic!("expected NewIntersection but got {out:?}"), - } + assert_eq!(alfie.next_intersection().await, auth_alfie); + assert_eq!(betty.next_intersection().await, auth_betty); alfie.join().await; betty.join().await; } + async fn transfer + Into>(from: &Handle, to: &Handle) { + let message = from.next_message::().await; + let message: IntersectionMessage = message.into(); + to.receive(message).await; + } + struct Handle { task: JoinHandle>, input: flume::Sender, @@ -585,17 +564,12 @@ mod tests { pub fn new(span: Span) -> Self { let (input, input_rx) = flume::bounded(1); let (output_tx, output) = flume::bounded(1); + let outbox = output_tx + .into_sink() + .sink_map_err(|_| Error::InvalidState("failed to send")); + let inbox = input_rx.into_stream(); let task = spawn_local( - async move { - PaiFinder::run_with_sink( - input_rx.into_stream(), - output_tx - .into_sink() - .sink_map_err(|_| Error::InvalidState("failed to send")), - ) - .await - } - .instrument(span), + async move { PaiFinder::run_with_sink(inbox, outbox).await }.instrument(span), ); Handle { input, @@ -613,18 +587,22 @@ mod tests { } pub async fn receive(&self, message: impl Into) { - self.input - .send_async(Input::ReceivedMessage(Ok(message.into()))) - .await - .unwrap(); + self.input(Input::ReceivedMessage(Ok(message.into()))).await } - pub async fn next_out(&self) -> Output { + pub async fn next(&self) -> Output { self.output.recv_async().await.unwrap() } - pub async fn next_out_message>(&self) -> T { - match self.next_out().await { + pub async fn next_intersection(&self) -> ReadAuthorisation { + match self.next().await { + Output::NewIntersection(intersection) => intersection.authorisation, + out => panic!("expected NewIntersection but got {out:?}"), + } + } + + pub async fn next_message>(&self) -> T { + match self.next().await { Output::SendMessage(message) => match T::try_from(message) { Err(_err) => panic!("wrong message type"), Ok(message) => message, From 97e213a5074eb67f9d068d9c0844180dc00bfb21 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 29 Jun 2024 18:36:17 +0200 Subject: [PATCH 077/198] add test for pai with subspace capabilities --- iroh-willow/src/proto/grouping.rs | 4 + iroh-willow/src/proto/meadowcap.rs | 13 +++ iroh-willow/src/proto/sync.rs | 21 ++++- iroh-willow/src/session/pai.rs | 142 ++++++++++++++++++++++++----- 4 files changed, 155 insertions(+), 25 deletions(-) diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 5da7297133..454afd09af 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -464,6 +464,10 @@ pub enum SubspaceArea { } impl SubspaceArea { + pub fn is_any(&self) -> bool { + matches!(self, SubspaceArea::Any) + } + fn includes(&self, other: &SubspaceArea) -> bool { match (self, other) { (SubspaceArea::Any, SubspaceArea::Any) => true, diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 8f2ece9fce..41e68c99a9 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -263,6 +263,19 @@ impl Encoder for McCapability { } } +impl Encoder for McSubspaceCapability { + // TODO: Use spec-compliant encoding instead of postcard. + fn encoded_len(&self) -> usize { + postcard::experimental::serialized_size(&self).unwrap() + } + + // TODO: Use spec-compliant encoding instead of postcard. + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + postcard::to_io(&self, out)?; + Ok(()) + } +} + #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum AccessMode { Read, diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 39f222d9f5..6da55fd42c 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -5,7 +5,10 @@ use iroh_base::hash::Hash; use serde::{Deserialize, Serialize}; use strum::{EnumCount, VariantArray}; -use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; +use crate::{ + proto::keys::UserSecretKey, + util::codec::{DecodeOutcome, Decoder, Encoder}, +}; use super::{ grouping::{Area, AreaOfInterest, ThreeDRange}, @@ -87,6 +90,22 @@ impl ReadAuthorisation { pub fn namespace(&self) -> NamespaceId { self.0 .0.granted_namespace().id() } + + pub fn delegate( + &self, + user_secret: &UserSecretKey, + new_user: UserPublicKey, + new_area: Area, + ) -> anyhow::Result { + let subspace_cap = match self.subspace_cap() { + Some(subspace_cap) if new_area.subspace.is_any() && !new_area.path.is_empty() => { + Some(subspace_cap.delegate(user_secret, new_user)?) + } + _ => None, + }; + let read_cap = self.read_cap().delegate(user_secret, new_user, new_area)?; + Ok(Self::new(read_cap, subspace_cap)) + } } /// The different resource handles employed by the WGPS. diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index 6baa21014c..771abd1aef 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -490,40 +490,39 @@ impl OnIntersection { #[cfg(test)] mod tests { use futures_util::SinkExt; - use rand_core::SeedableRng; - use tokio::task::{spawn_local, JoinHandle}; + use rand_core::{CryptoRngCore, SeedableRng}; + use tokio::task::{spawn_local, JoinHandle, LocalSet}; use tracing::{error_span, Instrument, Span}; use crate::{ proto::{ - keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, + grouping::{Area, SubspaceArea}, + keys::{NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, sync::{ - IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, ReadAuthorisation, + IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, + PaiRequestSubspaceCapability, ReadAuthorisation, }, + willow::Path, }, - session::Error, + session::{pai::PaiIntersection, Error}, }; use super::{Input, Output, PaiFinder}; #[tokio::test] async fn pai_smoke() { - iroh_test::logging::setup_multithreaded(); - let local = tokio::task::LocalSet::new(); - local.run_until(pai_smoke_inner()).await + let _guard = iroh_test::logging::setup(); + LocalSet::new().run_until(pai_smoke_inner()).await } async fn pai_smoke_inner() { let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let namespace = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - - let alfie_secret = UserSecretKey::generate(&mut rng); - let betty_secret = UserSecretKey::generate(&mut rng); - let alfie_public = alfie_secret.public_key(); - let betty_public = betty_secret.public_key(); + let (_, alfie_public) = keypair(&mut rng); + let (_, betty_public) = keypair(&mut rng); - let auth_alfie = ReadAuthorisation::new_owned(&namespace, alfie_public); - let auth_betty = ReadAuthorisation::new_owned(&namespace, betty_public); + let auth_alfie = ReadAuthorisation::new_owned(&namespace_secret, alfie_public); + let auth_betty = ReadAuthorisation::new_owned(&namespace_secret, betty_public); let (alfie, betty) = Handle::create_two(); @@ -535,13 +534,101 @@ mod tests { transfer::(&alfie, &betty).await; transfer::(&betty, &alfie).await; - assert_eq!(alfie.next_intersection().await, auth_alfie); - assert_eq!(betty.next_intersection().await, auth_betty); + assert_eq!(alfie.next_intersection().await.authorisation, auth_alfie); + assert_eq!(betty.next_intersection().await.authorisation, auth_betty); + + alfie.join().await; + betty.join().await; + } + + #[tokio::test] + async fn pai_subspace() { + let _guard = iroh_test::logging::setup(); + LocalSet::new().run_until(pai_subspace_inner()).await + } + async fn pai_subspace_inner() { + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let namespace = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); + + let (root_secret, root_public) = keypair(&mut rng); + let root_auth = ReadAuthorisation::new_owned(&namespace, root_public); + + let (_, alfie_public) = keypair(&mut rng); + let (_, betty_public) = keypair(&mut rng); + let (_, gemma_public) = keypair(&mut rng); + + let alfie_area = Area::new( + SubspaceArea::Id(gemma_public.id()), + Path::empty(), + Default::default(), + ); + let alfie_auth = root_auth + .delegate(&root_secret, alfie_public, alfie_area) + .unwrap(); + assert!(alfie_auth.subspace_cap().is_none()); + + let betty_area = Area::new( + SubspaceArea::Any, + Path::new(&[b"chess"]).unwrap(), + Default::default(), + ); + let betty_auth = root_auth + .delegate(&root_secret, betty_public, betty_area) + .unwrap(); + assert!(betty_auth.subspace_cap().is_some()); + + let (alfie, betty) = Handle::create_two(); + + alfie.submit(alfie_auth.clone()).await; + betty.submit(betty_auth.clone()).await; + + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; + + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; + + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; + + transfer::(&alfie, &betty).await; + transfer::(&betty, &alfie).await; + + let next: PaiRequestSubspaceCapability = alfie.next_message().await; + betty + .input(Input::ReceivedSubspaceCapRequest(next.handle)) + .await; + + let (handle, cap) = match betty.next().await { + Output::SignAndSendSubspaceCap(handle, cap) => (handle, cap), + other => panic!("expected SignAndSendSubspaceCap but got {other:?}"), + }; + + assert_eq!(&cap, betty_auth.subspace_cap().unwrap()); + let namespace = cap.granted_namespace().id(); + alfie + .input(Input::ReceivedVerifiedSubspaceCapReply(handle, namespace)) + .await; + + let next = alfie.next_intersection().await; + assert_eq!(next.authorisation, alfie_auth); + betty + .input(Input::ReceivedReadCapForIntersection(next.handle)) + .await; + + let next = betty.next_intersection().await; + assert_eq!(next.authorisation, betty_auth); alfie.join().await; betty.join().await; } + fn keypair(rng: &mut R) -> (UserSecretKey, UserPublicKey) { + let secret = UserSecretKey::generate(rng); + let public = secret.public_key(); + (secret, public) + } + async fn transfer + Into>(from: &Handle, to: &Handle) { let message = from.next_message::().await; let message: IntersectionMessage = message.into(); @@ -594,19 +681,26 @@ mod tests { self.output.recv_async().await.unwrap() } - pub async fn next_intersection(&self) -> ReadAuthorisation { + pub async fn next_intersection(&self) -> PaiIntersection { match self.next().await { - Output::NewIntersection(intersection) => intersection.authorisation, + Output::NewIntersection(intersection) => intersection, out => panic!("expected NewIntersection but got {out:?}"), } } pub async fn next_message>(&self) -> T { match self.next().await { - Output::SendMessage(message) => match T::try_from(message) { - Err(_err) => panic!("wrong message type"), - Ok(message) => message, - }, + Output::SendMessage(message) => { + let dbg = format!("{}", message); + match T::try_from(message) { + Err(_err) => panic!( + "wrong message type: expected {} but got {:?}", + std::any::type_name::(), + dbg + ), + Ok(message) => message, + } + } other => panic!("expected SendMessage but got {other:?}"), } } From 93ca603dd0ab0491fa050caf8fcccefc44758cd4 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 29 Jun 2024 18:37:02 +0200 Subject: [PATCH 078/198] refactor: put capabilities in arcs --- iroh-willow/src/proto/meadowcap.rs | 22 +++++++++++++--------- iroh-willow/src/proto/sync.rs | 25 +++++++++++-------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 41e68c99a9..761d52c05f 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,4 +1,4 @@ -use std::io::Write; +use std::{io::Write, sync::Arc}; use serde::{Deserialize, Serialize}; @@ -138,8 +138,8 @@ impl ValidatedCapability { Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, derive_more::From, )] pub enum McCapability { - Communal(CommunalCapability), - Owned(OwnedCapability), + Communal(Arc), + Owned(Arc), } impl McCapability { @@ -148,11 +148,11 @@ impl McCapability { user_key: UserPublicKey, access_mode: AccessMode, ) -> Self { - McCapability::Owned(OwnedCapability::new( + McCapability::Owned(Arc::new(OwnedCapability::new( namespace_secret, user_key, access_mode, - )) + ))) } pub fn new_communal( @@ -160,11 +160,11 @@ impl McCapability { user_key: UserPublicKey, access_mode: AccessMode, ) -> Self { - McCapability::Communal(CommunalCapability::new( + McCapability::Communal(Arc::new(CommunalCapability::new( namespace_key, user_key, access_mode, - )) + ))) } pub fn access_mode(&self) -> AccessMode { match self { @@ -243,8 +243,12 @@ impl McCapability { new_area: Area, ) -> anyhow::Result { let cap = match self { - Self::Communal(cap) => Self::Communal(cap.delegate(user_secret, new_user, new_area)?), - Self::Owned(cap) => Self::Owned(cap.delegate(user_secret, new_user, new_area)?), + Self::Communal(cap) => { + Self::Communal(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) + } + Self::Owned(cap) => { + Self::Owned(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) + } }; Ok(cap) } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 6da55fd42c..70c871e81f 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -52,7 +52,7 @@ pub type ReadCapability = meadowcap::McCapability; /// Each subspace capability must have a single receiver (a public key of some signature scheme), /// and a single granted namespace (a NamespaceId). /// The receiver can authenticate itself by signing a collaboratively selected nonce. -pub type SubspaceCapability = meadowcap::McSubspaceCapability; +pub type SubspaceCapability = Arc; pub type SyncSignature = meadowcap::UserSignature; @@ -60,35 +60,32 @@ pub type Receiver = meadowcap::UserPublicKey; /// Represents an authorisation to read an area of data in a Namespace. #[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] -pub struct ReadAuthorisation(Arc<(ReadCapability, Option)>); - -impl From for ReadAuthorisation { - fn from(value: ReadCapability) -> Self { - Self(Arc::new((value, None))) - } -} +pub struct ReadAuthorisation(ReadCapability, Option); impl ReadAuthorisation { pub fn new(read_cap: ReadCapability, subspace_cap: Option) -> Self { - Self(Arc::new((read_cap, subspace_cap))) + Self(read_cap, subspace_cap) } pub fn new_owned(namespace_secret: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { let read_cap = ReadCapability::new_owned(namespace_secret, user_key, AccessMode::Read); - let subspace_cap = SubspaceCapability::new(namespace_secret, user_key); + let subspace_cap = Arc::new(meadowcap::McSubspaceCapability::new( + namespace_secret, + user_key, + )); Self::new(read_cap, Some(subspace_cap)) } pub fn read_cap(&self) -> &ReadCapability { - &self.0 .0 + &self.0 } pub fn subspace_cap(&self) -> Option<&SubspaceCapability> { - self.0 .1.as_ref() + self.1.as_ref() } pub fn namespace(&self) -> NamespaceId { - self.0 .0.granted_namespace().id() + self.0.granted_namespace().id() } pub fn delegate( @@ -99,7 +96,7 @@ impl ReadAuthorisation { ) -> anyhow::Result { let subspace_cap = match self.subspace_cap() { Some(subspace_cap) if new_area.subspace.is_any() && !new_area.path.is_empty() => { - Some(subspace_cap.delegate(user_secret, new_user)?) + Some(Arc::new(subspace_cap.delegate(user_secret, new_user)?)) } _ => None, }; From 2c81e635758f1baeb869bfac99da21198408ff95 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 1 Jul 2024 16:40:12 +0200 Subject: [PATCH 079/198] fix typo --- iroh-willow/src/session/pai.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index 771abd1aef..92523e7cab 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -137,7 +137,7 @@ impl PaiFinder { async fn input(&mut self, input: Input) -> Result<(), Error> { match input { - Input::SubmitAuthorisation(auth) => self.submit_autorisation(auth).await, + Input::SubmitAuthorisation(auth) => self.submit_authorisation(auth).await, Input::ReceivedMessage(message) => match message? { IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, @@ -156,7 +156,7 @@ impl PaiFinder { Ok(()) } - async fn submit_autorisation(&mut self, authorisation: ReadAuthorisation) { + async fn submit_authorisation(&mut self, authorisation: ReadAuthorisation) { trace!(?authorisation, "pai submit auth"); let read_cap = authorisation.read_cap(); let fragment_kit = PaiScheme::get_fragment_kit(read_cap); From 32ad404182fd3d93933592546af92ec6d7f88050 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 5 Jul 2024 23:40:41 +0200 Subject: [PATCH 080/198] wip: refactor --- iroh-willow/src/actor.rs | 30 +- iroh-willow/src/auth.rs | 9 +- iroh-willow/src/net.rs | 8 +- iroh-willow/src/proto/sync.rs | 22 +- iroh-willow/src/session.rs | 8 +- iroh-willow/src/session/aoi_finder.rs | 218 +++++++ iroh-willow/src/session/capabilities.rs | 136 +++++ iroh-willow/src/session/data.rs | 53 +- iroh-willow/src/session/pai.rs | 33 +- iroh-willow/src/session/payload.rs | 21 +- iroh-willow/src/session/reconciler.rs | 431 ++++++++++---- iroh-willow/src/session/resource.rs | 191 +++--- iroh-willow/src/session/run.rs | 314 +++++++--- iroh-willow/src/session/state.rs | 722 ----------------------- iroh-willow/src/session/static_tokens.rs | 62 ++ iroh-willow/src/util/task.rs | 73 ++- 16 files changed, 1234 insertions(+), 1097 deletions(-) create mode 100644 iroh-willow/src/session/aoi_finder.rs create mode 100644 iroh-willow/src/session/capabilities.rs delete mode 100644 iroh-willow/src/session/state.rs create mode 100644 iroh-willow/src/session/static_tokens.rs diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index a5d41b0354..c0cf6c96ce 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -387,21 +387,31 @@ impl Actor { init, reply, } => { - let Channels { send, recv } = channels; + // let Channels { send, recv } = channels; let id = self.next_session_id(); - let session = - Session::new(&self.store, id, our_role, send, init, initial_transmission); - let session = match session { - Ok(session) => session, - Err(err) => return send_reply(reply, Err(err.into())), - }; + // let session = + // Session::new(&self.store, id, our_role, send, init, initial_transmission); + // let session = match session { + // Ok(session) => session, + // Err(err) => return send_reply(reply, Err(err.into())), + // }; let store = self.store.clone(); let cancel_token = CancellationToken::new(); - let future = session - .run(store, recv, cancel_token.clone()) - .instrument(error_span!("session", peer = %peer.fmt_short())); + let future = Session::run( + store, + channels, + cancel_token.clone(), + id, + our_role, + init, + initial_transmission, + ) + .instrument(error_span!("session", peer = %peer.fmt_short())); + // let future = session + // .run(store, recv, cancel_token.clone()) + // .instrument(error_span!("session", peer = %peer.fmt_short())); let task_key = self.session_tasks.spawn_local(id, future); let (on_finish_tx, on_finish_rx) = oneshot::channel(); diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index cca2246c6c..ca2dee9f62 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeSet, HashMap}, sync::{Arc, RwLock}, }; @@ -19,7 +19,7 @@ use crate::{ store::traits::{SecretStorage, SecretStoreError, Storage}, }; -pub type InterestMap = BTreeMap>; +pub type InterestMap = HashMap>; #[derive(Debug, Clone)] pub struct DelegateTo { @@ -232,12 +232,11 @@ impl Auth { let aoi = AreaOfInterest::new(area); (auth, BTreeSet::from_iter([aoi])) }) - .collect::>(); + .collect::>(); Ok(out) } Interests::Some(interests) => { - let mut out: BTreeMap> = - BTreeMap::new(); + let mut out: HashMap> = HashMap::new(); for (cap_selector, aoi_selector) in interests { let cap = self.get_read_cap(&cap_selector)?; if let Some(cap) = cap { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ae3995fce3..ced6404ef2 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -96,6 +96,7 @@ impl SessionHandle { /// Previously queued messages will still be sent out. The session will only be closed /// once the other peer closes their senders as well. pub fn close(&self) { + debug!("trigger user close"); self.handle.close() } @@ -299,7 +300,10 @@ async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result< #[cfg(test)] mod tests { - use std::{collections::BTreeSet, time::Instant}; + use std::{ + collections::BTreeSet, + time::{Duration, Instant}, + }; use futures_lite::StreamExt; use iroh_base::key::SecretKey; @@ -542,6 +546,8 @@ mod tests { let live_entries = done_rx.await?; expected_entries.extend(live_entries); + // TODO: replace with event + tokio::time::sleep(Duration::from_secs(1)).await; session_alfie.close(); let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 70c871e81f..1ef7b8422c 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -176,28 +176,18 @@ impl Channel { pub enum LogicalChannel { /// Logical channel for controlling the binding of new IntersectionHandles. Intersection, - /// Logical channel for performing 3d range-based set reconciliation. - Reconciliation, - // TODO: use all the channels - // right now everything but reconciliation goes into the control channel - // - /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. - Data, - // - // /// Logical channel for controlling the binding of new IntersectionHandles. - // Intersection, - // /// Logical channel for controlling the binding of new CapabilityHandles. Capability, - // /// Logical channel for controlling the binding of new AreaOfInterestHandles. AreaOfInterest, - // - // /// Logical channel for controlling the binding of new PayloadRequestHandles. - // PayloadRequest, - // /// Logical channel for controlling the binding of new StaticTokenHandles. StaticToken, + /// Logical channel for performing 3d range-based set reconciliation. + Reconciliation, + /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. + Data, + // /// Logical channel for controlling the binding of new PayloadRequestHandles. + // PayloadRequest, } #[derive(Debug, thiserror::Error)] diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index b7457b0e82..35eeb518b4 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -9,6 +9,8 @@ use crate::{ }, }; +mod aoi_finder; +mod capabilities; pub mod channels; mod data; mod error; @@ -17,14 +19,16 @@ mod payload; mod reconciler; mod resource; mod run; -mod state; +mod static_tokens; pub use self::channels::Channels; pub use self::error::Error; -pub use self::state::Session; pub type SessionId = u64; +#[derive(Debug)] +pub struct Session; + /// Data from the initial transmission /// /// This happens before the session is initialized. diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs new file mode 100644 index 0000000000..9bd2e81d28 --- /dev/null +++ b/iroh-willow/src/session/aoi_finder.rs @@ -0,0 +1,218 @@ +use std::{ + cell::RefCell, + rc::Rc, +}; + + + +use crate::{ + proto::{ + grouping::{Area, AreaOfInterest}, + keys::NamespaceId, + sync::{AreaOfInterestHandle, CapabilityHandle, ReadCapability, SetupBindAreaOfInterest}, + }, + session::{channels::ChannelSenders, resource::ResourceMap, Error, Scope}, +}; + +/// Intersection between two areas of interest. +#[derive(Debug, Clone)] +pub struct AoiIntersection { + pub our_handle: AreaOfInterestHandle, + pub their_handle: AreaOfInterestHandle, + pub intersection: Area, + pub namespace: NamespaceId, +} + +#[derive(Debug)] +struct AoiInfo { + aoi: AreaOfInterest, + namespace: NamespaceId, + // authorisation: CapabilityHandle, + // state: State, +} + +impl AoiInfo { + fn area(&self) -> &Area { + &self.aoi.area + } +} + +// #[derive(Debug, Default)] +// enum State { +// #[default] +// Submitted, +// Started { +// pending_ranges: HashSet, +// }, +// Complete, +// } + +#[derive(Debug, Default, Clone)] +pub struct AoiFinder(Rc>); + +#[derive(Debug, Default)] +struct Inner { + our_handles: ResourceMap, + their_handles: ResourceMap, + // queue: Queue, + subscribers: Vec>, +} + +impl AoiFinder { + pub fn close(&self) { + let mut inner = self.0.borrow_mut(); + inner.subscribers.drain(..); + } + pub fn subscribe(&self) -> flume::Receiver { + let (tx, rx) = flume::bounded(128); + self.0.borrow_mut().subscribers.push(tx); + rx + } + pub async fn bind_and_send_ours( + &self, + sender: &ChannelSenders, + namespace: NamespaceId, + aoi: AreaOfInterest, + authorisation: CapabilityHandle, + ) -> Result<(), Error> { + self.bind_ours(namespace, aoi.clone())?; + let msg = SetupBindAreaOfInterest { + area_of_interest: aoi, + authorisation, + }; + sender.send(msg).await?; + Ok(()) + } + + pub fn bind_ours(&self, namespace: NamespaceId, aoi: AreaOfInterest) -> Result<(), Error> { + self.0 + .borrow_mut() + .bind_validated_area_of_interest(Scope::Ours, namespace, aoi) + } + + pub fn validate_and_bind_theirs( + &self, + their_cap: &ReadCapability, + aoi: AreaOfInterest, + ) -> Result<(), Error> { + their_cap.try_granted_area(&aoi.area)?; + self.0.borrow_mut().bind_validated_area_of_interest( + Scope::Theirs, + their_cap.granted_namespace().id(), + aoi, + )?; + Ok(()) + } + + // pub async fn authorise_range_eventually( + // &self, + // range: &ThreeDRange, + // receiver_handle: AreaOfInterestHandle, + // sender_handle: AreaOfInterestHandle, + // ) -> Result { + // poll_fn(|cx| { + // let mut inner = self.0.borrow_mut(); + // Pin::new(&mut inner).poll_authorise_range_eventually( + // range, + // receiver_handle, + // sender_handle, + // cx, + // ) + // }) + // .await + // } +} + +impl Inner { + pub fn bind_validated_area_of_interest( + &mut self, + scope: Scope, + namespace: NamespaceId, + aoi: AreaOfInterest, + ) -> Result<(), Error> { + // capability.try_granted_area(&msg.area_of_interest.area)?; + // let namespace = *capability.granted_namespace(); + let area = aoi.area.clone(); + let info = AoiInfo { + aoi, + // authorisation: msg.authorisation, + namespace, + // state: State::Submitted, + }; + let handle = match scope { + Scope::Ours => self.our_handles.bind(info), + Scope::Theirs => self.their_handles.bind(info), + }; + + let other_resources = match scope { + Scope::Ours => &self.their_handles, + Scope::Theirs => &self.our_handles, + }; + + // TODO: If we stored the AoIs by namespace we would need to iterate less. + for (candidate_handle, candidate) in other_resources.iter() { + let candidate_handle = *candidate_handle; + // Ignore areas without a capability. + // let Some(cap) = other_resources.capabilities.get(&candidate.authorisation) else { + // continue; + // }; + // Ignore areas for a different namespace. + // if *cap.granted_namespace() != namespace { + // continue; + // } + if candidate.namespace != namespace { + continue; + } + // Check if we have an intersection. + if let Some(intersection) = candidate.area().intersection(&area) { + // We found an intersection! + let (our_handle, their_handle) = match scope { + Scope::Ours => (handle, candidate_handle), + Scope::Theirs => (candidate_handle, handle), + }; + let intersection = AoiIntersection { + our_handle, + their_handle, + intersection, + namespace, + }; + self.subscribers + .retain(|sender| sender.send(intersection.clone()).is_ok()); + // for subscriber in self.subscribers { + // // TODO: async, no panic + // subscriber.send(intersection).unwrap(); + // } + // self.queue.push_back(intersection); + } + } + Ok(()) + } + + // pub fn poll_authorise_range_eventually( + // &mut self, + // range: &ThreeDRange, + // receiver_handle: AreaOfInterestHandle, + // sender_handle: AreaOfInterestHandle, + // cx: &mut Context<'_>, + // ) -> Poll> { + // let their_aoi = ready!(self.their_handles.poll_get_eventually(sender_handle, cx)); + // let our_aoi = self.our_handles.try_get(&receiver_handle)?; + // let res = if our_aoi.namespace != their_aoi.namespace { + // Err(Error::AreaOfInterestNamespaceMismatch) + // } else if !our_aoi.area().includes_range(range) || !their_aoi.area().includes_range(range) { + // Err(Error::RangeOutsideCapability) + // } else { + // Ok(our_aoi.namespace) + // }; + // Poll::Ready(res) + // } +} + +// impl Stream for AoiFinder { +// type Item = AoiIntersection; +// +// fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { +// let mut queue = &mut self.0.borrow_mut().queue; +// Pin::new(&mut queue).poll_next(cx) +// } +// } diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs new file mode 100644 index 0000000000..9830b02ea5 --- /dev/null +++ b/iroh-willow/src/session/capabilities.rs @@ -0,0 +1,136 @@ +use std::{ + cell::RefCell, + future::poll_fn, + rc::Rc, + task::{ready, Poll}, +}; + +use crate::{ + proto::{ + challenge::ChallengeState, + keys::UserSignature, + sync::{ + AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, + IntersectionHandle, PaiReplySubspaceCapability, ReadCapability, + SetupBindReadCapability, + SubspaceCapability, + }, + }, + session::{channels::ChannelSenders, resource::ResourceMap, Error, Role}, + store::traits::SecretStorage, +}; + +#[derive(Debug, Clone)] +pub struct Capabilities(Rc>); + +#[derive(Debug)] +struct Inner { + challenge: ChallengeState, + ours: ResourceMap, + theirs: ResourceMap, +} + +impl Capabilities { + pub fn new(our_nonce: AccessChallenge, received_commitment: ChallengeHash) -> Self { + let challenge = ChallengeState::Committed { + our_nonce, + received_commitment, + }; + Self(Rc::new(RefCell::new(Inner { + challenge, + ours: Default::default(), + theirs: Default::default(), + }))) + } + + pub async fn bind_and_send_ours( + &self, + secret_store: &S, + sender: &ChannelSenders, + our_intersection_handle: IntersectionHandle, + capability: ReadCapability, + ) -> Result { + let mut inner = self.0.borrow_mut(); + let signable = inner.challenge.signable()?; + let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; + + let (our_handle, is_new) = inner.ours.bind_if_new(capability.clone()); + if is_new { + let msg = SetupBindReadCapability { + capability, + handle: our_intersection_handle, + signature, + }; + sender.send(msg).await?; + } + Ok(our_handle) + } + + pub fn bind_and_validate_theirs( + &self, + capability: ReadCapability, + signature: UserSignature, + ) -> Result<(), Error> { + capability.validate()?; + let mut inner = self.0.borrow_mut(); + inner.challenge.verify(capability.receiver(), &signature)?; + inner.theirs.bind(capability); + Ok(()) + } + + pub async fn get_theirs_eventually(&self, handle: CapabilityHandle) -> ReadCapability { + poll_fn(|cx| { + let mut inner = self.0.borrow_mut(); + let cap = ready!(inner.theirs.poll_get_eventually(handle, cx)); + Poll::Ready(cap.clone()) + }) + .await + } + + pub fn verify_subspace_capability( + &self, + capability: &SubspaceCapability, + signature: &UserSignature, + ) -> Result<(), Error> { + capability.validate()?; + self.0 + .borrow_mut() + .challenge + .verify(capability.receiver(), &signature)?; + Ok(()) + } + + pub fn reveal_commitment(&self) -> Result { + match self.0.borrow_mut().challenge { + ChallengeState::Committed { our_nonce, .. } => { + Ok(CommitmentReveal { nonce: our_nonce }) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + pub fn on_commitment_reveal( + &self, + our_role: Role, + their_nonce: AccessChallenge, + ) -> Result<(), Error> { + self.0.borrow_mut().challenge.reveal(our_role, their_nonce) + } + + pub fn sign_subspace_capabiltiy( + &self, + key_store: &K, + cap: &SubspaceCapability, + handle: IntersectionHandle, + ) -> Result { + let inner = self.0.borrow(); + let signable = inner.challenge.signable()?; + let signature = key_store.sign_user(&cap.receiver().id(), &signable)?; + let message = PaiReplySubspaceCapability { + handle, + capability: cap.clone(), + signature, + }; + Ok(message) + } +} diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index d067667a75..4fa15d946f 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -5,25 +5,39 @@ use crate::{ sync::{DataMessage, DataSendEntry, DataSendPayload}, willow::AuthorisedEntry, }, - session::{payload::DEFAULT_CHUNK_SIZE, Error}, + session::{ + channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, + SessionId, + }, store::{traits::Storage, Origin, Store}, }; use super::payload::{send_payload_chunked, CurrentPayload}; -use super::Session; #[derive(derive_more::Debug)] pub struct DataSender { - session: Session, store: Store, + send: ChannelSenders, + static_tokens: StaticTokens, + session_id: SessionId, } impl DataSender { - pub fn new(session: Session, store: Store) -> Self { - Self { session, store } + pub fn new( + store: Store, + send: ChannelSenders, + static_tokens: StaticTokens, + session_id: SessionId, + ) -> Self { + Self { + store, + send, + static_tokens, + session_id, + } } pub async fn run(mut self) -> Result<(), Error> { - let mut stream = self.store.entries().subscribe(*self.session.id()); + let mut stream = self.store.entries().subscribe(self.session_id); loop { match stream.recv().await { Ok(entry) => { @@ -43,11 +57,10 @@ impl DataSender { let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads // let available = entry.payload_length; - let (static_token_handle, static_token_bind_msg) = - self.session.bind_our_static_token(static_token); - if let Some(msg) = static_token_bind_msg { - self.session.send(msg).await?; - } + let static_token_handle = self + .static_tokens + .bind_and_send_ours(static_token, &self.send) + .await?; let digest = entry.payload_digest; let msg = DataSendEntry { entry, @@ -55,7 +68,7 @@ impl DataSender { dynamic_token, offset: 0, }; - self.session.send(msg).await?; + self.send.send(msg).await?; // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; @@ -63,7 +76,7 @@ impl DataSender { send_payload_chunked( digest, self.store.payloads(), - &self.session, + &self.send, DEFAULT_CHUNK_SIZE, |bytes| DataSendPayload { bytes }.into(), ) @@ -75,16 +88,18 @@ impl DataSender { #[derive(derive_more::Debug)] pub struct DataReceiver { - session: Session, store: Store, current_payload: CurrentPayload, + static_tokens: StaticTokens, + session_id: SessionId, } impl DataReceiver { - pub fn new(session: Session, store: Store) -> Self { + pub fn new(store: Store, static_tokens: StaticTokens, session_id: SessionId) -> Self { Self { - session, store, + static_tokens, + session_id, current_payload: Default::default(), } } @@ -101,8 +116,8 @@ impl DataReceiver { async fn on_send_entry(&mut self, message: DataSendEntry) -> Result<(), Error> { self.current_payload.assert_inactive()?; let authorised_entry = self - .session - .authorise_sent_entry( + .static_tokens + .authorise_entry_eventually( message.entry, message.static_token_handle, message.dynamic_token, @@ -110,7 +125,7 @@ impl DataReceiver { .await?; self.store .entries() - .ingest(&authorised_entry, Origin::Remote(*self.session.id()))?; + .ingest(&authorised_entry, Origin::Remote(self.session_id))?; self.current_payload .set(authorised_entry.into_entry(), None)?; Ok(()) diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai.rs index 92523e7cab..4e787a930f 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai.rs @@ -12,7 +12,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use genawaiter::GeneratorState; +use genawaiter::rc::Gen; use tracing::{debug, trace}; use crate::{ @@ -27,9 +27,8 @@ use crate::{ }, session::{ resource::{MissingResource, ResourceMap}, - Error, Scope, Session, + Error, Scope, }, - store::{traits::Storage, Store}, }; #[derive(Debug, thiserror::Error)] @@ -76,28 +75,10 @@ pub struct PaiFinder { } impl PaiFinder { - pub async fn run_with_session( - session: Session, - store: Store, + pub fn run_gen( inbox: impl Stream + Unpin, - ) -> Result<(), Error> { - let mut gen = genawaiter::rc::Gen::new(|co| PaiFinder::new(co).run(inbox)); - loop { - match gen.async_resume().await { - GeneratorState::Yielded(output) => match output { - Output::SendMessage(message) => session.send(message).await?, - Output::NewIntersection(intersection) => { - session.push_pai_intersection(intersection) - } - Output::SignAndSendSubspaceCap(handle, cap) => { - let message = - session.sign_subspace_capabiltiy(store.secrets(), &cap, handle)?; - session.send(Box::new(message)).await?; - } - }, - GeneratorState::Complete(res) => break res, - } - } + ) -> Gen>> { + Gen::new(|co| PaiFinder::new(co).run(inbox)) } #[cfg(test)] @@ -106,7 +87,9 @@ impl PaiFinder { mut outbox: impl futures_util::Sink + Unpin, ) -> Result<(), Error> { use futures_util::SinkExt; - let mut gen = genawaiter::rc::Gen::new(|co| PaiFinder::new(co).run(inbox)); + use genawaiter::GeneratorState; + + let mut gen = Gen::new(|co| PaiFinder::new(co).run(inbox)); loop { let y = gen.async_resume().await; match y { diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index 655757f01f..c85b65375c 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -6,19 +6,22 @@ use iroh_blobs::{ TempTag, }; -use crate::proto::{ - sync::Message, - willow::{Entry, PayloadDigest}, +use crate::{ + proto::{ + sync::Message, + willow::{Entry, PayloadDigest}, + }, + session::channels::ChannelSenders, }; -use super::{Error, Session}; +use super::{Error}; pub const DEFAULT_CHUNK_SIZE: usize = 1024 * 64; pub async fn send_payload_chunked( digest: PayloadDigest, payload_store: &P, - session: &Session, + senders: &ChannelSenders, chunk_size: usize, map: impl Fn(Bytes) -> Message, ) -> Result { @@ -37,7 +40,7 @@ pub async fn send_payload_chunked( .map_err(Error::PayloadStore)?; pos += bytes.len() as u64; let msg = map(bytes); - session.send(msg).await?; + senders.send(msg).await?; } Ok(true) } else { @@ -64,9 +67,9 @@ struct PayloadWriter { } impl CurrentPayload { - pub fn new() -> Self { - Self::default() - } + // pub fn new() -> Self { + // Self::default() + // } pub fn set(&mut self, entry: Entry, expected_length: Option) -> Result<(), Error> { if self.0.is_some() { diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 1e4a2ea9f3..b822cdd7a4 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,9 +1,14 @@ +use std::{ + collections::{HashMap, HashSet, VecDeque}, + num::NonZeroU64, +}; + use futures_lite::StreamExt; use tracing::{debug, trace}; use crate::{ proto::{ - grouping::ThreeDRange, + grouping::{Area, ThreeDRange}, keys::NamespaceId, sync::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, Message, @@ -13,44 +18,179 @@ use crate::{ }, }, session::{ - channels::MessageReceiver, + aoi_finder::{AoiIntersection}, + channels::{ChannelSenders, MessageReceiver}, payload::{send_payload_chunked, CurrentPayload}, - AoiIntersection, Error, Session, + static_tokens::StaticTokens, + Error, Role, SessionId, }, store::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Origin, Store, }, - util::{channel::WriteError, stream::Cancelable}, + util::{stream::Cancelable}, }; #[derive(derive_more::Debug)] pub struct Reconciler { - session: Session, + session_id: SessionId, + our_role: Role, + store: Store, - recv: Cancelable>, snapshot: ::Snapshot, + send: ChannelSenders, + recv: Cancelable>, + + static_tokens: StaticTokens, + targets: Targets, current_payload: CurrentPayload, + + our_range_counter: u64, + their_range_counter: u64, + pending_announced_entries: Option, +} + +type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); + +#[derive(Debug)] +struct Targets { + aoi_intersection_rx: flume::Receiver, + targets: HashMap, + init_queue: VecDeque, +} + +impl Targets { + fn new(aoi_intersection_rx: flume::Receiver) -> Self { + Self { + aoi_intersection_rx, + targets: Default::default(), + init_queue: Default::default(), + } + } + fn iter(&self) -> impl Iterator { + self.targets.values() + } + + fn get(&self, target: &TargetId) -> Result<&State, Error> { + self.targets + .get(target) + .ok_or(Error::MissingResource(target.1.into())) + } + fn get_mut(&mut self, target: &TargetId) -> Result<&mut State, Error> { + self.targets + .get_mut(target) + .ok_or(Error::MissingResource(target.1.into())) + } + + async fn init_next(&mut self) -> Option { + if let Some(target_id) = self.init_queue.pop_front() { + Some(target_id) + } else { + self.recv_next().await + } + // let target_id = self.recv_next().await?; + // Some(target_id) + } + + async fn get_eventually(&mut self, target_id: TargetId) -> Result<&mut State, Error> { + if self.targets.contains_key(&target_id) { + return Ok(self.targets.get_mut(&target_id).unwrap()); + } + + while let Some(next_target_id) = self.recv_next().await { + self.init_queue.push_back(next_target_id); + if next_target_id == target_id { + return Ok(self.targets.get_mut(&target_id).unwrap()); + } + } + Err(Error::InvalidState("aoi finder closed")) + } + + async fn recv_next(&mut self) -> Option { + let intersection = self.aoi_intersection_rx.recv_async().await.ok()?; + let (target_id, state) = State::new(intersection); + self.targets.insert(target_id, state); + Some(target_id) + } + + // fn init(&mut self, intersection: AoiIntersection) -> TargetId { + // let (target_id, state) = State::new(intersection); + // self.targets.insert(target_id, state); + // self.init_queue.push_back(target_id); + // target_id + // } +} + +#[derive(Debug)] +struct State { + namespace: NamespaceId, + area: Area, + our_uncovered_ranges: HashSet, + started: bool, +} + +impl State { + pub fn new(intersection: AoiIntersection) -> (TargetId, Self) { + let target_id = (intersection.our_handle, intersection.their_handle); + let state = Self { + namespace: intersection.namespace, + area: intersection.intersection, + our_uncovered_ranges: Default::default(), + started: false, + }; + (target_id, state) + } + + pub fn is_complete(&self) -> bool { + self.started && self.our_uncovered_ranges.is_empty() + } + + pub fn mark_our_range_pending(&mut self, range_count: u64) { + tracing::warn!("mark ours pending: {range_count}"); + self.started = true; + self.our_uncovered_ranges.insert(range_count); + } + + pub fn mark_our_range_covered(&mut self, range_count: u64) -> Result<(), Error> { + tracing::warn!(?self, "mark ours covered: {range_count}"); + if !self.our_uncovered_ranges.remove(&range_count) { + Err(Error::InvalidState( + "attempted to mark an unknown range as covered", + )) + } else { + Ok(()) + } + } } impl Reconciler { pub fn new( - session: Session, store: Store, recv: Cancelable>, + aoi_intersections: flume::Receiver, + static_tokens: StaticTokens, + session_id: SessionId, + send: ChannelSenders, + our_role: Role, ) -> Result { let snapshot = store.entries().snapshot()?; Ok(Self { - recv, + session_id, + send, + our_role, store, + recv, snapshot, - session, - current_payload: CurrentPayload::new(), + current_payload: Default::default(), + our_range_counter: 0, + their_range_counter: 0, + targets: Targets::new(aoi_intersections), + pending_announced_entries: Default::default(), + static_tokens, }) } pub async fn run(mut self) -> Result<(), Error> { - let our_role = self.session.our_role(); loop { tokio::select! { message = self.recv.try_next() => { @@ -59,16 +199,17 @@ impl Reconciler { Some(message) => self.on_message(message).await?, } } - Some(intersection) = self.session.next_aoi_intersection() => { - if self.session.mode().is_live() { - self.store.entries().watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); - } - if our_role.is_alfie() { - self.initiate(intersection).await?; + Some(target_id) = self.targets.init_next() => { + // // TODO: Move to another place. + // if self.session.mode().is_live() { + // self.store.entries().watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); + // } + if self.our_role.is_alfie() { + self.initiate(target_id).await?; } } } - if self.session.reconciliation_is_complete() && !self.current_payload.is_active() { + if self.is_complete() { debug!("reconciliation complete"); break; } @@ -76,42 +217,62 @@ impl Reconciler { Ok(()) } + fn is_complete(&self) -> bool { + if self.current_payload.is_active() { + return false; + } + if self.pending_announced_entries.is_some() { + return false; + } + self.targets.iter().all(|t| t.is_complete()) + } + async fn on_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { match message { ReconciliationMessage::SendFingerprint(message) => { - self.on_send_fingerprint(message).await? + self.received_send_fingerprint(message).await? } ReconciliationMessage::AnnounceEntries(message) => { - self.on_announce_entries(message).await? + let res = self.received_announce_entries(message).await; + tracing::warn!("received_announce_entries DONE: {res:?}"); + res?; + } + ReconciliationMessage::SendEntry(message) => self.received_send_entry(message).await?, + ReconciliationMessage::SendPayload(message) => { + self.received_send_payload(message).await? } - ReconciliationMessage::SendEntry(message) => self.on_send_entry(message).await?, - ReconciliationMessage::SendPayload(message) => self.on_send_payload(message).await?, ReconciliationMessage::TerminatePayload(message) => { - self.on_terminate_payload(message).await? + self.received_terminate_payload(message).await? } }; Ok(()) } - async fn initiate(&mut self, intersection: AoiIntersection) -> Result<(), Error> { - let AoiIntersection { - our_handle, - their_handle, - intersection, - namespace, - } = intersection; - let range = intersection.into_range(); - let fingerprint = self.snapshot.fingerprint(namespace, &range)?; - self.send_fingerprint(range, fingerprint, our_handle, their_handle, None) + async fn initiate(&mut self, target_id: TargetId) -> Result<(), Error> { + let target = self.targets.get(&target_id)?; + let range = target.area.into_range(); + let fingerprint = self.snapshot.fingerprint(target.namespace, &range)?; + self.send_fingerprint(target_id, range, fingerprint, None) .await?; Ok(()) } - async fn on_send_fingerprint( + // fn mark_our_range_covered(&mut self, handle: ) + + async fn received_send_fingerprint( &mut self, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { - let (namespace, range_count) = self.session.on_send_fingerprint(&message).await?; + let range_count = self.next_range_count_theirs(); + + let target_id = (message.receiver_handle, message.sender_handle); + let target = self.targets.get_eventually(target_id).await?; + let namespace = target.namespace; + + if let Some(range_count) = message.covers { + target.mark_our_range_covered(range_count)?; + } + let our_fingerprint = self.snapshot.fingerprint(namespace, &message.range)?; // case 1: fingerprint match. @@ -130,10 +291,9 @@ impl Reconciler { // case 2: fingerprint is empty else if message.fingerprint.is_empty() { self.announce_and_send_entries( + target_id, namespace, &message.range, - message.receiver_handle, - message.sender_handle, true, Some(range_count), None, @@ -143,67 +303,119 @@ impl Reconciler { // case 3: fingerprint doesn't match and is non-empty else { // reply by splitting the range into parts unless it is very short - self.split_range_and_send_parts( - namespace, - &message.range, - message.receiver_handle, - message.sender_handle, - range_count, - ) - .await?; + // self.split_range_and_send_parts(target_id, namespace, &message.range, range_count) + // .await?; + // TODO: Expose + let split_opts = SplitOpts::default(); + let snapshot = self.snapshot.clone(); + let mut iter = snapshot + .split_range(namespace, &message.range, &split_opts)? + .peekable(); + while let Some(res) = iter.next() { + let (subrange, action) = res?; + let is_last = iter.peek().is_none(); + let covers = is_last.then_some(range_count); + match action { + SplitAction::SendEntries(count) => { + self.announce_and_send_entries( + target_id, + namespace, + &subrange, + true, + covers, + Some(count), + ) + .await?; + } + SplitAction::SendFingerprint(fingerprint) => { + self.send_fingerprint(target_id, subrange, fingerprint, covers) + .await?; + } + } + } } + Ok(()) } - async fn on_announce_entries( + async fn received_announce_entries( &mut self, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { - trace!("on_announce_entries start"); + trace!("received_announce_entries start"); self.current_payload.assert_inactive()?; - let (namespace, range_count) = self.session.on_announce_entries(&message).await?; + if self.pending_announced_entries.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + + let target_id = (message.receiver_handle, message.sender_handle); + let target = self.targets.get_eventually(target_id).await?; + let namespace = target.namespace; + + if let Some(range_count) = message.covers { + target.mark_our_range_covered(range_count)?; + } + + if let Some(c) = NonZeroU64::new(message.count) { + self.pending_announced_entries = Some(c); + } + // if message.count != 0 { + // self.pending_announced_entries = Some(message.count); + // } + if message.want_response { + let range_count = self.next_range_count_theirs(); self.announce_and_send_entries( + target_id, namespace, &message.range, - message.receiver_handle, - message.sender_handle, false, - range_count, + Some(range_count), None, ) .await?; } - trace!("on_announce_entries done"); + trace!("received_announce_entries done"); + Ok(()) + } + + fn decrement_pending_announced_entries(&mut self) -> Result<(), Error> { + self.pending_announced_entries = match self.pending_announced_entries.take() { + None => return Err(Error::InvalidMessageInCurrentState), + Some(c) => NonZeroU64::new(c.get().saturating_sub(1)), + }; Ok(()) } - async fn on_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { + async fn received_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { self.current_payload.assert_inactive()?; - self.session.decrement_pending_announced_entries()?; + self.decrement_pending_announced_entries()?; let authorised_entry = self - .session - .authorise_sent_entry( - message.entry.entry, + .static_tokens + .authorise_entry_eventually( + message.entry.entry.clone(), message.static_token_handle, message.dynamic_token, ) .await?; self.store .entries() - .ingest(&authorised_entry, Origin::Remote(*self.session.id()))?; + .ingest(&authorised_entry, Origin::Remote(self.session_id))?; self.current_payload - .set(authorised_entry.into_entry(), Some(message.entry.available))?; + .set(message.entry.entry, Some(message.entry.available))?; Ok(()) } - async fn on_send_payload(&mut self, message: ReconciliationSendPayload) -> Result<(), Error> { + async fn received_send_payload( + &mut self, + message: ReconciliationSendPayload, + ) -> Result<(), Error> { self.current_payload .recv_chunk(self.store.payloads(), message.bytes) .await?; Ok(()) } - async fn on_terminate_payload( + async fn received_terminate_payload( &mut self, _message: ReconciliationTerminatePayload, ) -> Result<(), Error> { @@ -213,18 +425,16 @@ impl Reconciler { async fn send_fingerprint( &mut self, + target_id: TargetId, range: ThreeDRange, fingerprint: Fingerprint, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, covers: Option, ) -> anyhow::Result<()> { - self.session.mark_our_range_pending(our_handle); let msg = ReconciliationSendFingerprint { range, fingerprint, - sender_handle: our_handle, - receiver_handle: their_handle, + sender_handle: target_id.0, + receiver_handle: target_id.1, covers, }; self.send(msg).await?; @@ -234,10 +444,9 @@ impl Reconciler { #[allow(clippy::too_many_arguments)] async fn announce_and_send_entries( &mut self, + target_id: TargetId, namespace: NamespaceId, range: &ThreeDRange, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, want_response: bool, covers: Option, our_entry_count: Option, @@ -251,28 +460,23 @@ impl Reconciler { count: our_entry_count, want_response, will_sort: false, // todo: sorted? - sender_handle: our_handle, - receiver_handle: their_handle, + sender_handle: target_id.0, + receiver_handle: target_id.1, covers, }; - if want_response { - self.session.mark_our_range_pending(our_handle); - } + self.send(msg).await?; - for authorised_entry in self - .snapshot - .get_entries_with_authorisation(namespace, range) - { + let snapshot = self.snapshot.clone(); + for authorised_entry in snapshot.get_entries_with_authorisation(namespace, range) { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - let (static_token_handle, static_token_bind_msg) = - self.session.bind_our_static_token(static_token); - if let Some(msg) = static_token_bind_msg { - self.send(msg).await?; - } + let static_token_handle = self + .static_tokens + .bind_and_send_ours(static_token, &self.send) + .await?; let digest = entry.payload_digest; let msg = ReconciliationSendEntry { entry: LengthyEntry::new(entry, available), @@ -288,7 +492,7 @@ impl Reconciler { && send_payload_chunked( digest, self.store.payloads(), - &self.session, + &self.send, chunk_size, |bytes| ReconciliationSendPayload { bytes }.into(), ) @@ -301,46 +505,35 @@ impl Reconciler { Ok(()) } - async fn split_range_and_send_parts( - &mut self, - namespace: NamespaceId, - range: &ThreeDRange, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - range_count: u64, - ) -> Result<(), Error> { - // TODO: expose this config - let config = SplitOpts::default(); - // clone to avoid borrow checker trouble - let snapshot = self.snapshot.clone(); - let mut iter = snapshot.split_range(namespace, range, &config)?.peekable(); - while let Some(res) = iter.next() { - let (subrange, action) = res?; - let is_last = iter.peek().is_none(); - let covers = is_last.then_some(range_count); - match action { - SplitAction::SendEntries(count) => { - self.announce_and_send_entries( - namespace, - &subrange, - our_handle, - their_handle, - true, - covers, - Some(count), - ) - .await?; - } - SplitAction::SendFingerprint(fingerprint) => { - self.send_fingerprint(subrange, fingerprint, our_handle, their_handle, covers) - .await?; - } + async fn send(&mut self, message: impl Into) -> Result<(), Error> { + let message: Message = message.into(); + let want_response = match &message { + Message::ReconciliationSendFingerprint(msg) => { + Some((msg.sender_handle, msg.receiver_handle)) + } + Message::ReconciliationAnnounceEntries(msg) if msg.want_response => { + Some((msg.sender_handle, msg.receiver_handle)) } + _ => None, + }; + if let Some(target_id) = want_response { + let range_count = self.next_range_count_ours(); + let target = self.targets.get_mut(&target_id)?; + target.mark_our_range_pending(range_count); } + self.send.send(message).await?; Ok(()) } - async fn send(&self, message: impl Into) -> Result<(), WriteError> { - self.session.send(message).await + fn next_range_count_ours(&mut self) -> u64 { + let range_count = self.our_range_counter; + self.our_range_counter += 1; + range_count + } + + fn next_range_count_theirs(&mut self) -> u64 { + let range_count = self.their_range_counter; + self.their_range_counter += 1; + range_count } } diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index d0a0150b9e..029d2eb791 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -3,55 +3,56 @@ use std::{ task::{Context, Poll, Waker}, }; -use crate::proto::sync::{ - AreaOfInterestHandle, CapabilityHandle, IsHandle, ReadCapability, ResourceHandle, - SetupBindAreaOfInterest, StaticToken, StaticTokenHandle, +use crate::proto::{ + sync::{ + IsHandle, ResourceHandle, + }, }; use super::Error; -#[derive(Debug, Default)] -pub struct ResourceMaps { - pub capabilities: ResourceMap, - pub areas_of_interest: ResourceMap, - pub static_tokens: ResourceMap, -} -impl ResourceMaps { - pub fn register_waker(&mut self, handle: ResourceHandle, waker: Waker) { - tracing::trace!(?handle, "register_notify"); - match handle { - ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_waker(h, waker), - ResourceHandle::Capability(h) => self.capabilities.register_waker(h, waker), - ResourceHandle::StaticToken(h) => self.static_tokens.register_waker(h, waker), - ResourceHandle::Intersection(_h) => unimplemented!(), - } - } - - pub fn get(&self, selector: F, handle: H) -> Result - where - H: IsHandle, - F: for<'a> Fn(&'a Self) -> &'a ResourceMap, - R: Eq + PartialEq + Clone, - { - let store = selector(self); - let res = store.try_get(&handle).cloned()?; - Ok(res) - } - - pub fn poll_get_eventually( - &mut self, - selector: F, - handle: H, - cx: &mut Context<'_>, - ) -> Poll - where - F: for<'a> Fn(&'a mut Self) -> &'a mut ResourceMap, - { - let res = selector(self); - let r = std::task::ready!(res.poll_get_eventually(handle, cx)); - Poll::Ready(r.clone()) - } -} +// #[derive(Debug, Default)] +// pub struct ResourceMaps { +// pub capabilities: ResourceMap, +// // pub areas_of_interest: ResourceMap, +// // pub static_tokens: ResourceMap, +// } +// impl ResourceMaps { +// // pub fn register_waker(&mut self, handle: ResourceHandle, waker: Waker) { +// // tracing::trace!(?handle, "register_notify"); +// // match handle { +// // ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_waker(h, waker), +// // ResourceHandle::Capability(h) => self.capabilities.register_waker(h, waker), +// // ResourceHandle::StaticToken(h) => self.static_tokens.register_waker(h, waker), +// // ResourceHandle::Intersection(_h) => unimplemented!(), +// // } +// // } +// +// pub fn get(&self, selector: F, handle: H) -> Result +// where +// H: IsHandle, +// F: for<'a> Fn(&'a Self) -> &'a ResourceMap, +// R: Eq + PartialEq + Clone, +// { +// let store = selector(self); +// let res = store.try_get(&handle).cloned()?; +// Ok(res) +// } +// +// pub fn poll_get_eventually( +// &mut self, +// selector: F, +// handle: H, +// cx: &mut Context<'_>, +// ) -> Poll +// where +// F: for<'a> Fn(&'a mut Self) -> &'a mut ResourceMap, +// { +// let res = selector(self); +// let r = std::task::ready!(res.poll_get_eventually(handle, cx)); +// Poll::Ready(r.clone()) +// } +// } #[derive(Debug)] pub struct ResourceMap { @@ -73,7 +74,6 @@ impl Default for ResourceMap { impl ResourceMap where H: IsHandle, - R: Eq + PartialEq, { pub fn iter(&self) -> impl Iterator + '_ { self.map.iter().map(|(h, r)| (h, &r.value)) @@ -94,23 +94,9 @@ where handle } - pub fn register_waker(&mut self, handle: H, notifier: Waker) { - self.wakers.entry(handle).or_default().push_back(notifier) - } - - pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { - // TODO: Optimize / find out if reverse index is better than find_map - if let Some(handle) = self - .map - .iter() - .find_map(|(handle, r)| (r.value == resource).then_some(handle)) - { - (*handle, false) - } else { - let handle = self.bind(resource); - (handle, true) - } - } + // fn register_waker(&mut self, handle: H, notifier: Waker) { + // self.wakers.entry(handle).or_default().push_back(notifier) + // } pub fn try_get(&self, handle: &H) -> Result<&R, MissingResource> { self.map @@ -120,25 +106,25 @@ where .ok_or_else(|| MissingResource((*handle).into())) } - pub fn get(&self, handle: &H) -> Option<&R> { - self.map.get(handle).as_ref().map(|r| &r.value) - } - - pub async fn get_eventually(&mut self, handle: H) -> &R { - std::future::poll_fn(|ctx| { - // cannot use self.get() and self.register_waker() here due to borrow checker. - if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { - Poll::Ready(resource) - } else { - self.wakers - .entry(handle) - .or_default() - .push_back(ctx.waker().to_owned()); - Poll::Pending - } - }) - .await - } + // pub fn get(&self, handle: &H) -> Option<&R> { + // self.map.get(handle).as_ref().map(|r| &r.value) + // } + // + // pub async fn get_eventually(&mut self, handle: H) -> &R { + // std::future::poll_fn(|ctx| { + // // cannot use self.get() and self.register_waker() here due to borrow checker. + // if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { + // Poll::Ready(resource) + // } else { + // self.wakers + // .entry(handle) + // .or_default() + // .push_back(ctx.waker().to_owned()); + // Poll::Pending + // } + // }) + // .await + // } pub fn poll_get_eventually(&mut self, handle: H, cx: &mut Context<'_>) -> Poll<&R> { // cannot use self.get() and self.register_waker() here due to borrow checker. @@ -163,6 +149,47 @@ where } } } +impl ResourceMap +where + H: IsHandle, + R: Eq + PartialEq, +{ + pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { + // TODO: Optimize / find out if reverse index is better than find_map + if let Some(handle) = self + .map + .iter() + .find_map(|(handle, r)| (r.value == resource).then_some(handle)) + { + (*handle, false) + } else { + let handle = self.bind(resource); + (handle, true) + } + } +} + +// #[derive(Debug, Clone)] +// pub struct SharedResourceMap(Rc>>); +// +// impl SharedResourceMap +// where +// H: IsHandle, +// { +// pub fn bind(&mut self, resource: R) -> H { +// self.0.borrow_mut().bind(resource) +// } +// } +// +// impl SharedResourceMap +// where +// H: IsHandle, +// R: Eq + PartialEq, +// { +// pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { +// self.0.borrow_mut().bind_if_new(resource) +// } +// } #[derive(Debug, thiserror::Error)] #[error("missing resource {0:?}")] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 4f3b16e6a1..789551767b 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,18 +1,27 @@ +use std::rc::Rc; + use futures_concurrency::stream::StreamExt as _; use futures_lite::StreamExt as _; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, trace, warn}; +use tracing::{debug, error_span, trace, warn, Span}; use crate::{ + auth::InterestMap, proto::sync::{ControlIssueGuarantee, LogicalChannel, Message}, session::{ - channels::LogicalChannelReceivers, - pai::{self, PaiFinder}, - Error, Session, + aoi_finder::AoiFinder, + capabilities::Capabilities, + channels::{ChannelSenders, LogicalChannelReceivers}, + pai::{self, PaiFinder, PaiIntersection}, + static_tokens::StaticTokens, + Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit, + }, + store::{ + traits::{SecretStorage, Storage}, + Store, }, - store::{traits::Storage, Store}, - util::{channel::Receiver, stream::Cancelable}, + util::{channel::Receiver, stream::Cancelable, task::SharedJoinMap}, }; use super::{ @@ -26,11 +35,15 @@ const INITIAL_GUARANTEES: u64 = u64::MAX; impl Session { pub async fn run( - self, store: Store, - recv: ChannelReceivers, + channels: Channels, cancel_token: CancellationToken, + session_id: SessionId, + our_role: Role, + init: SessionInit, + initial_transmission: InitialTransmission, ) -> Result<(), Error> { + let Channels { send, recv } = channels; let ChannelReceivers { control_recv, logical_recv: @@ -53,52 +66,121 @@ impl Session { let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); + let caps = Capabilities::new( + initial_transmission.our_nonce, + initial_transmission.received_commitment, + ); + let tokens = StaticTokens::default(); + let aoi_finder = AoiFinder::default(); + + let tasks = Tasks::default(); + + let interests = store.auth().find_read_caps_for_interests(init.interests)?; + let interests = Rc::new(interests); + // Setup the private area intersection finder. let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); - self.spawn(error_span!("pai"), { + tasks.spawn(error_span!("pai"), { let store = store.clone(); + let send = send.clone(); + let caps = caps.clone(); let inbox = pai_inbox_rx .into_stream() .merge(intersection_recv.map(pai::Input::ReceivedMessage)); - move |session| async move { PaiFinder::run_with_session(session, store, inbox).await } + let interests = Rc::clone(&interests); + let aoi_finder = aoi_finder.clone(); + async move { + use genawaiter::GeneratorState; + use pai::Output; + let mut gen = PaiFinder::run_gen(inbox); + loop { + match gen.async_resume().await { + GeneratorState::Yielded(output) => match output { + Output::SendMessage(message) => send.send(message).await?, + Output::NewIntersection(intersection) => { + on_pai_intersection( + &interests, + store.secrets(), + &aoi_finder, + &caps, + &send, + intersection, + ) + .await?; + } + Output::SignAndSendSubspaceCap(handle, cap) => { + let message = + caps.sign_subspace_capabiltiy(store.secrets(), &cap, handle)?; + send.send(Box::new(message)).await?; + } + }, + GeneratorState::Complete(res) => { + return res; + } + } + } + } }); // Spawn a task to handle incoming static tokens. - self.spawn(error_span!("stt"), move |session| async move { - while let Some(message) = static_tokens_recv.try_next().await? { - session.on_setup_bind_static_token(message); + tasks.spawn(error_span!("stt"), { + let tokens = tokens.clone(); + async move { + while let Some(message) = static_tokens_recv.try_next().await? { + tokens.bind_theirs(message.static_token); + } + Ok(()) } - Ok(()) }); // Only setup data receiver if session is configured in live mode. - if self.mode() == SessionMode::Live { - self.spawn(error_span!("dat:r"), { + if init.mode == SessionMode::Live { + tasks.spawn(error_span!("data-recv"), { let store = store.clone(); - move |session| async move { - let mut data_receiver = DataReceiver::new(session, store); + let tokens = tokens.clone(); + async move { + let mut data_receiver = DataReceiver::new(store, tokens, session_id); while let Some(message) = data_recv.try_next().await? { data_receiver.on_message(message).await?; } Ok(()) } }); - self.spawn(error_span!("dat:s"), { + tasks.spawn(error_span!("data-send"), { + let store = store.clone(); + let tokens = tokens.clone(); + let send = send.clone(); + async move { + DataSender::new(store, send, tokens, session_id) + .run() + .await?; + Ok(()) + } + }); + tasks.spawn(error_span!("aoi-watch"), { let store = store.clone(); - move |session| async move { - DataSender::new(session, store).run().await?; + let aoi_intersections = aoi_finder.subscribe(); + async move { + while let Ok(intersection) = aoi_intersections.recv_async().await { + store.entries().watch_area( + session_id, + intersection.namespace, + intersection.intersection.clone(), + ); + } Ok(()) } }); } // Spawn a task to handle incoming capabilities. - self.spawn(error_span!("cap"), { + tasks.spawn(error_span!("cap-recv"), { let to_pai = pai_inbox_tx.clone(); - move |session| async move { + let caps = caps.clone(); + async move { while let Some(message) = capability_recv.try_next().await? { let handle = message.handle; - session.on_setup_bind_read_capability(message)?; + caps.bind_and_validate_theirs(message.capability, message.signature)?; to_pai .send_async(pai::Input::ReceivedReadCapForIntersection(handle)) .await @@ -109,48 +191,60 @@ impl Session { }); // Spawn a task to handle incoming areas of interest. - self.spawn(error_span!("aoi"), move |session| async move { - while let Some(message) = aoi_recv.try_next().await? { - session.on_bind_area_of_interest(message).await?; + tasks.spawn(error_span!("aoi-recv"), { + let aoi_finder = aoi_finder.clone(); + let caps = caps.clone(); + async move { + while let Some(message) = aoi_recv.try_next().await? { + let cap = caps.get_theirs_eventually(message.authorisation).await; + aoi_finder.validate_and_bind_theirs(&cap, message.area_of_interest)?; + } + aoi_finder.close(); + Ok(()) } - Ok(()) }); // Spawn a task to handle reconciliation messages - self.spawn(error_span!("rec"), { + tasks.spawn(error_span!("rec"), { let cancel_token = cancel_token.clone(); - let store = store.clone(); - move |session| async move { - let res = Reconciler::new(session.clone(), store, reconciliation_recv)? - .run() - .await; - if !session.mode().is_live() { - debug!("reconciliation complete and not in live mode: close session"); + let aoi_intersections = aoi_finder.subscribe(); + let reconciler = Reconciler::new( + store.clone(), + reconciliation_recv, + aoi_intersections, + tokens.clone(), + session_id, + send.clone(), + our_role, + )?; + async move { + let res = reconciler.run().await; + if res.is_ok() && !init.mode.is_live() { + debug!("reconciliation complete and not in live mode: trigger cancel"); cancel_token.cancel(); } res } }); - // Spawn a task to react to found PAI intersections. - let pai_intersections = self.pai_intersection_stream(); - let mut pai_intersections = Cancelable::new(pai_intersections, cancel_token.clone()); - self.spawn(error_span!("pai:intersections"), { - let store = store.clone(); - move |session| async move { - while let Some(intersection) = pai_intersections.next().await { - session.on_pai_intersection(&store, intersection).await?; - } - Ok(()) - } - }); - // Spawn a task to handle control messages - self.spawn(error_span!("ctl"), { + tasks.spawn(error_span!("ctl-recv"), { let cancel_token = cancel_token.clone(); - move |session| async move { - let res = control_loop(session, control_recv, pai_inbox_tx).await; - cancel_token.cancel(); + let fut = control_loop( + our_role, + interests, + caps, + send.clone(), + tasks.clone(), + control_recv, + pai_inbox_tx, + ); + async move { + let res = fut.await; + if res.is_ok() { + debug!("control channel closed: trigger cancel"); + cancel_token.cancel(); + } res } }); @@ -159,61 +253,80 @@ impl Session { let result = loop { tokio::select! { _ = cancel_token.cancelled() => { + debug!("cancel token triggered: close session"); break Ok(()); }, - Some((span, result)) = self.join_next_task() => { + Some((span, result)) = tasks.join_next() => { let _guard = span.enter(); - trace!(?result, remaining = self.remaining_tasks(), "task complete"); - if let Err(err) = result { - warn!(?err, "session task failed: abort session"); - break Err(err); + trace!(?result, remaining = tasks.remaining_tasks(), "task complete"); + match result { + Err(err) => { + warn!(?err, "session task paniced: abort session"); + break Err(Error::TaskFailed(err)); + }, + Ok(Err(err)) => { + warn!(?err, "session task failed: abort session"); + break Err(err); + } + Ok(Ok(())) => {} } }, } }; if result.is_err() { - self.abort_all_tasks(); + debug!("aborting session"); + tasks.abort_all(); } else { debug!("closing session"); } // Unsubscribe from the store. This stops the data send task. - store.entries().unsubscribe(self.id()); + store.entries().unsubscribe(&session_id); // Wait for remaining tasks to terminate to catch any panics. // TODO: Add timeout? - while let Some((span, result)) = self.join_next_task().await { + while let Some((span, result)) = tasks.join_next().await { let _guard = span.enter(); - trace!(?result, remaining = self.remaining_tasks(), "task complete"); - if let Err(err) = result { - match err { - Error::TaskFailed(err) if err.is_cancelled() => {} - err => warn!("task failed: {err:?}"), - } + trace!( + ?result, + remaining = tasks.remaining_tasks(), + "task complete" + ); + match result { + Err(err) if err.is_cancelled() => {} + Err(err) => warn!("task paniced: {err:?}"), + Ok(Err(err)) => warn!("task failed: {err:?}"), + Ok(Ok(())) => {} } } // Close our channel senders. // This will stop the network send loop after all pending data has been sent. - self.close_senders(); + send.close_all(); debug!(success = result.is_ok(), "session complete"); result } } +pub type Tasks = SharedJoinMap>; + async fn control_loop( - session: Session, + our_role: Role, + our_interests: Rc, + caps: Capabilities, + sender: ChannelSenders, + tasks: Tasks, mut control_recv: Cancelable>, to_pai: flume::Sender, ) -> Result<(), Error> { - debug!(role = ?session.our_role(), "start session"); + debug!(role = ?our_role, "start session"); let mut commitment_revealed = false; // Reveal our nonce. - let reveal_message = session.reveal_commitment()?; - session.send(reveal_message).await?; + let reveal_message = caps.reveal_commitment()?; + sender.send(reveal_message).await?; // Issue guarantees for all logical channels. for channel in LogicalChannel::iter() { @@ -221,26 +334,37 @@ async fn control_loop( amount: INITIAL_GUARANTEES, channel, }; - session.send(msg).await?; + sender.send(msg).await?; } while let Some(message) = control_recv.try_next().await? { match message { Message::CommitmentReveal(msg) => { - session.on_commitment_reveal(msg)?; + caps.on_commitment_reveal(our_role, msg.nonce)?; if commitment_revealed { return Err(Error::InvalidMessageInCurrentState)?; } commitment_revealed = true; - let to_pai = to_pai.clone(); - session.spawn(error_span!("setup-pai"), move |session| { - setup_pai(session, to_pai) - }); + + let submit_interests_fut = { + let to_pai = to_pai.clone(); + let our_interests = Rc::clone(&our_interests); + async move { + for authorisation in our_interests.keys() { + to_pai + .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } + Ok(()) + } + }; + tasks.spawn(error_span!("setup-pai"), submit_interests_fut); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; // trace!(?channel, %amount, "add guarantees"); - session.add_guarantees(channel, amount); + sender.get_logical(channel).add_guarantees(amount); } Message::PaiRequestSubspaceCapability(msg) => { to_pai @@ -249,7 +373,7 @@ async fn control_loop( .map_err(|_| Error::InvalidState("PAI actor dead"))?; } Message::PaiReplySubspaceCapability(msg) => { - session.verify_subspace_capability(&msg)?; + caps.verify_subspace_capability(&msg.capability, &msg.signature)?; to_pai .send_async(pai::Input::ReceivedVerifiedSubspaceCapReply( msg.handle, @@ -265,12 +389,30 @@ async fn control_loop( Ok(()) } -async fn setup_pai(session: Session, to_pai: flume::Sender) -> Result<(), Error> { - for authorisation in session.interests().keys() { - to_pai - .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; +async fn on_pai_intersection( + interests: &InterestMap, + secrets: &S, + aoi_finder: &AoiFinder, + capabilities: &Capabilities, + sender: &ChannelSenders, + intersection: PaiIntersection, +) -> Result<(), Error> { + let PaiIntersection { + authorisation, + handle, + } = intersection; + let aois = interests + .get(&authorisation) + .ok_or(Error::NoKnownInterestsForCapability)?; + let namespace = authorisation.namespace(); + let capability_handle = capabilities + .bind_and_send_ours(secrets, sender, handle, authorisation.read_cap().clone()) + .await?; + + for aoi in aois.iter().cloned() { + aoi_finder + .bind_and_send_ours(sender, namespace, aoi, capability_handle) + .await?; } Ok(()) } diff --git a/iroh-willow/src/session/state.rs b/iroh-willow/src/session/state.rs deleted file mode 100644 index 1ef26e9d31..0000000000 --- a/iroh-willow/src/session/state.rs +++ /dev/null @@ -1,722 +0,0 @@ -use std::{ - cell::{Ref, RefCell, RefMut}, - collections::HashSet, - future::poll_fn, - pin::Pin, - rc::Rc, - task::Poll, -}; - -use futures_lite::Stream; -use tracing::{debug, trace, Instrument, Span}; - -use crate::{ - auth::InterestMap, - proto::{ - challenge::ChallengeState, - grouping::ThreeDRange, - keys::NamespaceId, - sync::{ - AreaOfInterestHandle, CapabilityHandle, Channel, CommitmentReveal, DynamicToken, - IntersectionHandle, IsHandle, LogicalChannel, Message, PaiReplySubspaceCapability, - ReadCapability, ReconciliationAnnounceEntries, ReconciliationSendFingerprint, - SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, StaticToken, - StaticTokenHandle, SubspaceCapability, - }, - willow::{AuthorisedEntry, Entry}, - }, - session::{pai::PaiIntersection, InitialTransmission, SessionInit}, - store::{ - traits::{SecretStorage, Storage}, - Store, - }, - util::{channel::WriteError, queue::Queue, task::JoinMap}, -}; - -use super::{ - channels::ChannelSenders, - resource::{ResourceMap, ResourceMaps}, - AoiIntersection, Error, Role, Scope, SessionId, SessionMode, -}; - -#[derive(Debug, Clone)] -pub struct Session(Rc); - -#[derive(derive_more::Debug)] -struct SessionInner { - id: SessionId, - our_role: Role, - mode: SessionMode, - interests: InterestMap, - state: RefCell, - send: ChannelSenders, - tasks: RefCell>>, -} - -impl Session { - pub fn new( - store: &Store, - id: SessionId, - our_role: Role, - send: ChannelSenders, - init: SessionInit, - initial_transmission: InitialTransmission, - ) -> Result { - let state = SessionState::new(initial_transmission); - let interests = store.auth().find_read_caps_for_interests(init.interests)?; - Ok(Self(Rc::new(SessionInner { - mode: init.mode, - id, - our_role, - interests, - state: RefCell::new(state), - send, - tasks: Default::default(), - }))) - } - - pub fn id(&self) -> &SessionId { - &self.0.id - } - - pub fn mode(&self) -> SessionMode { - self.0.mode - } - - pub fn interests(&self) -> &InterestMap { - &self.0.interests - } - - pub fn spawn(&self, span: Span, f: F) - where - F: FnOnce(Session) -> Fut, - Fut: std::future::Future> + 'static, - { - let state = self.clone(); - let fut = f(state); - let fut = fut.instrument(span.clone()); - self.0.tasks.borrow_mut().spawn_local(span, fut); - } - - pub async fn join_next_task(&self) -> Option<(Span, Result<(), Error>)> { - poll_fn(|cx| { - let mut tasks = self.0.tasks.borrow_mut(); - let res = std::task::ready!(Pin::new(&mut tasks).poll_join_next(cx)); - let res = match res { - None => None, - Some((key, Ok(r))) => Some((key, r)), - Some((key, Err(r))) => Some((key, Err(r.into()))), - }; - Poll::Ready(res) - }) - .await - } - - pub fn abort_all_tasks(&self) { - self.0.tasks.borrow_mut().abort_all(); - } - - // pub fn remaining_tasks(&self) -> usize { - // let tasks = self.0.tasks.borrow(); - // tasks.len() - // } - - pub fn remaining_tasks(&self) -> String { - let tasks = self.0.tasks.borrow(); - let mut out = vec![]; - for (span, _k) in tasks.iter() { - let name = span.metadata().unwrap().name(); - out.push(name.to_string()); - } - out.join(",") - } - - pub fn log_remaining_tasks(&self) { - let tasks = self.0.tasks.borrow(); - let names = tasks - .iter() - .map(|t| t.0.metadata().unwrap().name()) - .collect::>(); - debug!(tasks=?names, "active_tasks"); - } - - pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { - let message: Message = message.into(); - if let Some((their_handle, range_count)) = message.covers_region() { - if let Err(err) = self - .state_mut() - .mark_their_range_covered(their_handle, range_count) - { - // TODO: Is this really unreachable? I think so, as this would indicate a logic - // error purely on our side. - unreachable!("mark_their_range_covered: {err:?}"); - } - } - self.0.send.send(message).await - } - - pub fn close_senders(&self) { - self.0.send.close_all(); - } - - pub fn add_guarantees(&self, channel: LogicalChannel, amount: u64) { - self.0 - .send - .get(Channel::Logical(channel)) - .add_guarantees(amount); - } - - pub fn our_role(&self) -> Role { - self.0.our_role - } - - pub async fn next_aoi_intersection(&self) -> Option { - poll_fn(|cx| { - let mut queue = &mut self.0.state.borrow_mut().aoi_intersection_queue; - Pin::new(&mut queue).poll_next(cx) - }) - .await - } - - pub fn get_our_resource( - &self, - selector: F, - handle: H, - ) -> Result - where - F: for<'a> Fn(&'a ResourceMaps) -> &'a ResourceMap, - { - let state = self.0.state.borrow_mut(); - state.our_resources.get(&selector, handle) - } - - pub async fn get_their_resource_eventually( - &self, - selector: F, - handle: H, - ) -> R - where - F: for<'a> Fn(&'a mut ResourceMaps) -> &'a mut ResourceMap, - { - let inner = &self.clone().0; - poll_fn(move |cx| { - let mut state = inner.state.borrow_mut(); - state - .their_resources - .poll_get_eventually(&selector, handle, cx) - }) - .await - } - - pub fn sign_subspace_capabiltiy( - &self, - key_store: &K, - cap: &SubspaceCapability, - handle: IntersectionHandle, - ) -> Result { - let inner = self.state(); - let signable = inner.challenge.signable()?; - let signature = key_store.sign_user(&cap.receiver().id(), &signable)?; - let message = PaiReplySubspaceCapability { - handle, - capability: cap.clone(), - signature, - }; - Ok(message) - } - - pub fn bind_and_sign_capability( - &self, - key_store: &K, - our_intersection_handle: IntersectionHandle, - capability: ReadCapability, - ) -> Result<(CapabilityHandle, Option), Error> { - let mut inner = self.0.state.borrow_mut(); - let signable = inner.challenge.signable()?; - let signature = key_store.sign_user(&capability.receiver().id(), &signable)?; - - let (our_handle, is_new) = inner - .our_resources - .capabilities - .bind_if_new(capability.clone()); - let maybe_message = is_new.then(|| SetupBindReadCapability { - capability, - handle: our_intersection_handle, - signature, - }); - Ok((our_handle, maybe_message)) - } - - pub fn mark_our_range_pending(&self, our_handle: AreaOfInterestHandle) { - let mut state = self.state_mut(); - state.reconciliation_started = true; - let range_count = state.our_range_counter; - state.our_uncovered_ranges.insert((our_handle, range_count)); - state.our_range_counter += 1; - } - - pub async fn on_announce_entries( - &self, - message: &ReconciliationAnnounceEntries, - ) -> Result<(NamespaceId, Option), Error> { - let range_count = { - let mut state = self.state_mut(); - if let Some(range_count) = message.covers { - state.mark_our_range_covered(message.receiver_handle, range_count)?; - } - if state.pending_announced_entries.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - if message.count != 0 { - state.pending_announced_entries = Some(message.count); - } - if message.want_response { - let range_count = state.add_pending_range_theirs(message.sender_handle); - Some(range_count) - } else { - None - } - }; - let namespace = self - .range_is_authorised_eventually( - &message.range, - message.receiver_handle, - message.sender_handle, - ) - .await?; - Ok((namespace, range_count)) - } - - pub async fn on_send_fingerprint( - &self, - message: &ReconciliationSendFingerprint, - ) -> Result<(NamespaceId, u64), Error> { - let range_count = { - let mut state = self.state_mut(); - state.reconciliation_started = true; - if let Some(range_count) = message.covers { - state.mark_our_range_covered(message.receiver_handle, range_count)?; - } - state.add_pending_range_theirs(message.sender_handle) - }; - - let namespace = self - .range_is_authorised_eventually( - &message.range, - message.receiver_handle, - message.sender_handle, - ) - .await?; - Ok((namespace, range_count)) - } - - async fn range_is_authorised_eventually( - &self, - range: &ThreeDRange, - receiver_handle: AreaOfInterestHandle, - sender_handle: AreaOfInterestHandle, - ) -> Result { - let our_namespace = self.our_aoi_to_namespace(&receiver_handle)?; - let their_namespace = self - .their_aoi_to_namespace_eventually(sender_handle) - .await?; - if our_namespace != their_namespace { - return Err(Error::AreaOfInterestNamespaceMismatch); - } - let our_aoi = self.get_our_resource(|r| &r.areas_of_interest, receiver_handle)?; - let their_aoi = self - .get_their_resource_eventually(|r| &mut r.areas_of_interest, sender_handle) - .await; - - if !our_aoi.area().includes_range(range) || !their_aoi.area().includes_range(range) { - return Err(Error::RangeOutsideCapability); - } - Ok(our_namespace) - } - - pub fn on_setup_bind_static_token(&self, msg: SetupBindStaticToken) { - self.state_mut() - .their_resources - .static_tokens - .bind(msg.static_token); - } - - pub fn on_setup_bind_read_capability(&self, msg: SetupBindReadCapability) -> Result<(), Error> { - // TODO: verify intersection handle - trace!("received capability {msg:?}"); - msg.capability.validate()?; - let mut state = self.state_mut(); - state - .challenge - .verify(msg.capability.receiver(), &msg.signature)?; - state.their_resources.capabilities.bind(msg.capability); - Ok(()) - } - - pub fn verify_subspace_capability( - &self, - msg: &PaiReplySubspaceCapability, - ) -> Result<(), Error> { - msg.capability.validate()?; - self.state() - .challenge - .verify(msg.capability.receiver(), &msg.signature)?; - Ok(()) - } - - pub fn reconciliation_is_complete(&self) -> bool { - let state = self.state(); - // tracing::debug!( - // "reconciliation_is_complete started {} our_pending_ranges {}, their_pending_ranges {}, pending_entries {:?} mode {:?}", - // state.reconciliation_started, - // state.our_uncovered_ranges.len(), - // state.their_uncovered_ranges.len(), - // state.pending_announced_entries, - // self.mode(), - // ); - state.reconciliation_started - && state.our_uncovered_ranges.is_empty() - && state.their_uncovered_ranges.is_empty() - && state.pending_announced_entries.is_none() - } - - pub fn reveal_commitment(&self) -> Result { - let state = self.state(); - match state.challenge { - ChallengeState::Committed { our_nonce, .. } => { - Ok(CommitmentReveal { nonce: our_nonce }) - } - _ => Err(Error::InvalidMessageInCurrentState), - } - } - - pub fn on_commitment_reveal(&self, msg: CommitmentReveal) -> Result<(), Error> { - let our_role = self.our_role(); - let mut state = self.state_mut(); - state.challenge.reveal(our_role, msg.nonce) - } - - /// Bind a area of interest, and start reconciliation if this area of interest has an - /// intersection with a remote area of interest. - /// - /// Will fail if the capability is missing. Await [`Self::get_our_resource_eventually`] or - /// [`Self::get_their_resource_eventually`] before calling this. - /// - /// Returns `true` if the capability was newly bound, and `false` if not. - pub fn bind_area_of_interest( - &self, - scope: Scope, - message: SetupBindAreaOfInterest, - capability: &ReadCapability, - ) -> Result<(), Error> { - self.state_mut() - .bind_area_of_interest(scope, message, capability) - } - - pub async fn on_bind_area_of_interest( - &self, - message: SetupBindAreaOfInterest, - ) -> Result<(), Error> { - let capability = self - .get_their_resource_eventually(|r| &mut r.capabilities, message.authorisation) - .await; - self.state_mut() - .bind_area_of_interest(Scope::Theirs, message, &capability)?; - Ok(()) - } - - pub async fn authorise_sent_entry( - &self, - entry: Entry, - static_token_handle: StaticTokenHandle, - dynamic_token: DynamicToken, - ) -> Result { - let static_token = self - .get_their_resource_eventually(|r| &mut r.static_tokens, static_token_handle) - .await; - - let authorised_entry = AuthorisedEntry::try_from_parts(entry, static_token, dynamic_token)?; - - Ok(authorised_entry) - } - - // pub async fn on_send_entry2(&self, entry: Entry, static_token_handle: StaticTokenHandle, dynamic_token: DynamicToken) -> Result<(), Error> { - // let static_token = self - // .get_their_resource_eventually(|r| &mut r.static_tokens, message.static_token_handle) - // .await; - // - // let authorised_entry = AuthorisedEntry::try_from_parts( - // message.entry.entry, - // static_token, - // message.dynamic_token, - // )?; - // - // self.state_mut().decrement_pending_announced_entries(); - // - // Ok(authorised_entry) - // } - - pub fn decrement_pending_announced_entries(&self) -> Result<(), Error> { - self.state_mut().decrement_pending_announced_entries() - } - - // pub fn prepare_entry_for_send(&self, entry: AuthorisedEntry) -> Result< - - pub fn bind_our_static_token( - &self, - static_token: StaticToken, - ) -> (StaticTokenHandle, Option) { - let mut state = self.state_mut(); - let (handle, is_new) = state - .our_resources - .static_tokens - .bind_if_new(static_token.clone()); - let msg = is_new.then(|| SetupBindStaticToken { static_token }); - (handle, msg) - } - - pub fn push_pai_intersection(&self, intersection: PaiIntersection) { - self.state_mut() - .pai_intersection_queue - .push_back(intersection) - } - - pub async fn next_pai_intersection(&self) -> Option { - poll_fn(|cx| { - let mut queue = &mut self.0.state.borrow_mut().pai_intersection_queue; - Pin::new(&mut queue).poll_next(cx) - }) - .await - } - - pub fn pai_intersection_stream(&self) -> PaiIntersectionStream { - PaiIntersectionStream { - session: self.clone(), - } - } - - pub async fn on_pai_intersection( - &self, - store: &Store, - intersection: PaiIntersection, - ) -> Result<(), Error> { - // TODO: Somehow getting from the BTreeMap is not working, even though the equality check - // below works as exepcted. - // let aois = self - // .0 - // .interests - // .get(&intersection.authorisation) - // .ok_or(Error::NoKnownInterestsForCapability)?; - for (authorisation, aois) in self.0.interests.iter() { - if *authorisation != intersection.authorisation { - continue; - } - let read_cap = authorisation.read_cap(); - let (our_capability_handle, message) = self.bind_and_sign_capability( - store.secrets(), - intersection.handle, - read_cap.clone(), - )?; - if let Some(message) = message { - self.send(message).await?; - } - - for area_of_interest in aois.iter().cloned() { - let msg = SetupBindAreaOfInterest { - area_of_interest, - authorisation: our_capability_handle, - }; - self.bind_area_of_interest(Scope::Ours, msg.clone(), read_cap)?; - self.send(msg).await?; - } - } - Ok(()) - } - - async fn their_aoi_to_namespace_eventually( - &self, - handle: AreaOfInterestHandle, - ) -> Result { - let aoi = self - .get_their_resource_eventually(|r| &mut r.areas_of_interest, handle) - .await; - let capability = self - .get_their_resource_eventually(|r| &mut r.capabilities, aoi.authorisation) - .await; - let namespace_id = capability.granted_namespace().into(); - Ok(namespace_id) - } - - fn our_aoi_to_namespace(&self, handle: &AreaOfInterestHandle) -> Result { - let state = self.state_mut(); - let aoi = state.our_resources.areas_of_interest.try_get(handle)?; - let capability = state - .our_resources - .capabilities - .try_get(&aoi.authorisation)?; - let namespace_id = capability.granted_namespace().into(); - Ok(namespace_id) - } - - fn state(&self) -> Ref { - self.0.state.borrow() - } - - fn state_mut(&self) -> RefMut { - self.0.state.borrow_mut() - } -} - -#[derive(Debug)] -struct SessionState { - challenge: ChallengeState, - our_resources: ResourceMaps, - their_resources: ResourceMaps, - reconciliation_started: bool, - our_range_counter: u64, - their_range_counter: u64, - our_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, - their_uncovered_ranges: HashSet<(AreaOfInterestHandle, u64)>, - pending_announced_entries: Option, - aoi_intersection_queue: Queue, - pai_intersection_queue: Queue, -} - -impl SessionState { - fn new(initial_transmission: InitialTransmission) -> Self { - let challenge_state = ChallengeState::Committed { - our_nonce: initial_transmission.our_nonce, - received_commitment: initial_transmission.received_commitment, - }; - // TODO: make use of initial_transmission.their_max_payload_size. - Self { - challenge: challenge_state, - reconciliation_started: false, - our_resources: Default::default(), - their_resources: Default::default(), - our_range_counter: 0, - their_range_counter: 0, - our_uncovered_ranges: Default::default(), - their_uncovered_ranges: Default::default(), - pending_announced_entries: Default::default(), - aoi_intersection_queue: Default::default(), - pai_intersection_queue: Default::default(), - } - } - - fn bind_area_of_interest( - &mut self, - scope: Scope, - msg: SetupBindAreaOfInterest, - capability: &ReadCapability, - ) -> Result<(), Error> { - capability.try_granted_area(&msg.area_of_interest.area)?; - - let namespace = *capability.granted_namespace(); - let area = msg.area_of_interest.area.clone(); - let handle = match scope { - Scope::Ours => self.our_resources.areas_of_interest.bind(msg), - Scope::Theirs => self.their_resources.areas_of_interest.bind(msg), - }; - - let other_resources = match scope { - Scope::Ours => &self.their_resources, - Scope::Theirs => &self.our_resources, - }; - - // TODO: If we stored the AoIs by namespace we would need to iterate less. - for (candidate_handle, candidate) in other_resources.areas_of_interest.iter() { - let candidate_handle = *candidate_handle; - // Ignore areas without a capability. - let Some(cap) = other_resources.capabilities.get(&candidate.authorisation) else { - continue; - }; - // Ignore areas for a different namespace. - if *cap.granted_namespace() != namespace { - continue; - } - // Check if we have an intersection. - if let Some(intersection) = candidate.area().intersection(&area) { - // We found an intersection! - let (our_handle, their_handle) = match scope { - Scope::Ours => (handle, candidate_handle), - Scope::Theirs => (candidate_handle, handle), - }; - let info = AoiIntersection { - our_handle, - their_handle, - intersection, - namespace: namespace.into(), - }; - self.aoi_intersection_queue.push_back(info); - } - } - Ok(()) - } - - fn decrement_pending_announced_entries(&mut self) -> Result<(), Error> { - let remaining = self - .pending_announced_entries - .as_mut() - .ok_or(Error::InvalidMessageInCurrentState)?; - *remaining -= 1; - if *remaining == 0 { - self.pending_announced_entries = None; - } - Ok(()) - } - - fn mark_our_range_covered( - &mut self, - our_handle: AreaOfInterestHandle, - range_count: u64, - ) -> Result<(), Error> { - if !self.our_uncovered_ranges.remove(&(our_handle, range_count)) { - Err(Error::InvalidMessageInCurrentState) - } else { - Ok(()) - } - } - - fn mark_their_range_covered( - &mut self, - their_handle: AreaOfInterestHandle, - range_count: u64, - ) -> Result<(), Error> { - // trace!(?their_handle, ?range_count, "mark_their_range_covered"); - if !self - .their_uncovered_ranges - .remove(&(their_handle, range_count)) - { - Err(Error::InvalidMessageInCurrentState) - } else { - Ok(()) - } - } - - fn add_pending_range_theirs(&mut self, their_handle: AreaOfInterestHandle) -> u64 { - let range_count = self.their_range_counter; - self.their_range_counter += 1; - // debug!(?their_handle, ?range_count, "add_pending_range_theirs"); - self.their_uncovered_ranges - .insert((their_handle, range_count)); - range_count - } -} - -#[derive(Debug)] -pub struct PaiIntersectionStream { - session: Session, -} - -impl Stream for PaiIntersectionStream { - type Item = PaiIntersection; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - let mut queue = &mut self.session.0.state.borrow_mut().pai_intersection_queue; - Pin::new(&mut queue).poll_next(cx) - } -} diff --git a/iroh-willow/src/session/static_tokens.rs b/iroh-willow/src/session/static_tokens.rs new file mode 100644 index 0000000000..db954df86a --- /dev/null +++ b/iroh-willow/src/session/static_tokens.rs @@ -0,0 +1,62 @@ +use std::{ + cell::RefCell, + future::poll_fn, + rc::Rc, + task::{ready, Poll}, +}; + +use crate::{ + proto::{ + sync::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, + willow::{AuthorisedEntry, Entry}, + }, + session::{channels::ChannelSenders, resource::ResourceMap, Error}, +}; + +#[derive(Debug, Clone, Default)] +pub struct StaticTokens(Rc>); + +#[derive(Debug, Default)] +struct Inner { + ours: ResourceMap, + theirs: ResourceMap, +} + +impl StaticTokens { + pub fn bind_theirs(&self, token: StaticToken) { + self.0.borrow_mut().theirs.bind(token); + } + + pub async fn bind_and_send_ours( + &self, + static_token: StaticToken, + send: &ChannelSenders, + ) -> Result { + let (handle, is_new) = { self.0.borrow_mut().ours.bind_if_new(static_token.clone()) }; + if is_new { + let msg = SetupBindStaticToken { static_token }; + send.send(msg).await?; + } + Ok(handle) + } + + pub async fn authorise_entry_eventually( + &self, + entry: Entry, + static_token_handle: StaticTokenHandle, + dynamic_token: DynamicToken, + ) -> Result { + let inner = self.0.clone(); + let static_token = poll_fn(move |cx| { + let mut inner = inner.borrow_mut(); + let token = ready!(inner.theirs.poll_get_eventually(static_token_handle, cx)); + Poll::Ready(token.clone()) + }) + .await; + + let authorised_entry = + AuthorisedEntry::try_from_parts(entry, static_token.clone(), dynamic_token)?; + + Ok(authorised_entry) + } +} diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index f6394303a0..826038a492 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -1,9 +1,11 @@ //! Utilities for working with tokio tasks. use std::{ + cell::RefCell, collections::HashMap, - future::Future, + future::{poll_fn, Future}, pin::Pin, + rc::Rc, task::{Context, Poll}, }; @@ -11,6 +13,7 @@ use futures_concurrency::future::{future_group, FutureGroup}; use futures_lite::{Stream, StreamExt}; use tokio::task::AbortHandle; use tokio::task::JoinError; +use tracing::{Instrument, Span}; #[derive(derive_more::Debug, Clone, Copy, Hash, Eq, PartialEq)] #[debug("{:?}", _0)] @@ -120,3 +123,71 @@ impl Stream for JoinMap { Self::poll_join_next(self.get_mut(), cx) } } + +#[derive(Debug)] +pub struct SharedJoinMap(Rc>>); + +impl Clone for SharedJoinMap { + fn clone(&self) -> Self { + Self(Rc::clone(&self.0)) + } +} + +impl Default for SharedJoinMap { + fn default() -> Self { + Self(Default::default()) + } +} + +impl SharedJoinMap +where + K: Unpin, + T: 'static, +{ + pub async fn join_next(&self) -> Option<(K, Result)> { + poll_fn(|cx| { + let mut tasks = self.0.borrow_mut(); + let res = std::task::ready!(Pin::new(&mut tasks).poll_join_next(cx)); + Poll::Ready(res) + }) + .await + } + + pub fn abort_all(&self) { + self.0.borrow_mut().abort_all(); + } + + pub async fn shutdown(&self) { + self.abort_all(); + while let Some(_) = self.join_next().await {} + } +} + +impl SharedJoinMap { + pub fn spawn(&self, span: Span, fut: Fut) + where + Fut: std::future::Future + 'static, + { + let fut = fut.instrument(span.clone()); + self.0.borrow_mut().spawn_local(span, fut); + } + + pub fn remaining_tasks(&self) -> String { + let tasks = self.0.borrow(); + let mut out = vec![]; + for (span, _k) in tasks.iter() { + let name = span.metadata().unwrap().name(); + out.push(name.to_string()); + } + out.join(",") + } + + pub fn log_remaining_tasks(&self) { + let tasks = self.0.borrow(); + let names = tasks + .iter() + .map(|t| t.0.metadata().unwrap().name()) + .collect::>(); + tracing::debug!(tasks=?names, "active_tasks"); + } +} From e9d50fbf76a40638be4103997d59b1b8f62b77b4 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sat, 6 Jul 2024 00:31:54 +0200 Subject: [PATCH 081/198] chore: cleanup refactor --- iroh-willow/src/actor.rs | 5 +- iroh-willow/src/net.rs | 6 +- iroh-willow/src/proto/sync.rs | 13 + iroh-willow/src/session.rs | 39 +- iroh-willow/src/session/aoi_finder.rs | 149 ++---- iroh-willow/src/session/capabilities.rs | 59 ++- iroh-willow/src/session/data.rs | 36 +- iroh-willow/src/session/error.rs | 2 +- .../src/session/{pai.rs => pai_finder.rs} | 2 +- iroh-willow/src/session/payload.rs | 2 +- iroh-willow/src/session/reconciler.rs | 49 +- iroh-willow/src/session/resource.rs | 95 +--- iroh-willow/src/session/run.rs | 481 +++++++++--------- 13 files changed, 366 insertions(+), 572 deletions(-) rename iroh-willow/src/session/{pai.rs => pai_finder.rs} (99%) diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index c0cf6c96ce..52ccc8e7f9 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -15,9 +15,10 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, + sync::InitialTransmission, willow::{AuthorisedEntry, Entry}, }, - session::{Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit}, + session::{run_session, Channels, Error, Role, SessionId, SessionInit}, store::{ traits::{EntryReader, SecretStorage, Storage}, Origin, Store, @@ -399,7 +400,7 @@ impl Actor { let store = self.store.clone(); let cancel_token = CancellationToken::new(); - let future = Session::run( + let future = run_session( store, channels, cancel_token.clone(), diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ced6404ef2..2c4680f6c1 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -12,15 +12,15 @@ use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrume use crate::{ actor::{self, ActorHandle}, proto::sync::{ - AccessChallenge, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, - MAX_PAYLOAD_SIZE_POWER, + AccessChallenge, Channel, InitialTransmission, LogicalChannel, Message, + CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, session::{ channels::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, - InitialTransmission, Role, SessionInit, + Role, SessionInit, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 1ef7b8422c..8bb8884169 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -58,6 +58,19 @@ pub type SyncSignature = meadowcap::UserSignature; pub type Receiver = meadowcap::UserPublicKey; +/// Data from the initial transmission +/// +/// This happens before the session is initialized. +#[derive(Debug)] +pub struct InitialTransmission { + /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. + pub our_nonce: AccessChallenge, + /// The [`ChallengeHash`] we received from the remote. + pub received_commitment: ChallengeHash, + /// The maximum payload size we received from the remote. + pub their_max_payload_size: u64, +} + /// Represents an authorisation to read an area of data in a Namespace. #[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct ReadAuthorisation(ReadCapability, Option); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 35eeb518b4..1d30aa4e02 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,20 +1,13 @@ use std::collections::{BTreeMap, BTreeSet}; -use crate::{ - auth::CapSelector, - proto::{ - grouping::{Area, AreaOfInterest}, - keys::NamespaceId, - sync::{AccessChallenge, AreaOfInterestHandle, ChallengeHash}, - }, -}; +use crate::{auth::CapSelector, proto::grouping::AreaOfInterest}; mod aoi_finder; mod capabilities; pub mod channels; mod data; mod error; -mod pai; +mod pai_finder; mod payload; mod reconciler; mod resource; @@ -23,25 +16,10 @@ mod static_tokens; pub use self::channels::Channels; pub use self::error::Error; +pub use self::run::run_session; pub type SessionId = u64; -#[derive(Debug)] -pub struct Session; - -/// Data from the initial transmission -/// -/// This happens before the session is initialized. -#[derive(Debug)] -pub struct InitialTransmission { - /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. - pub our_nonce: AccessChallenge, - /// The [`ChallengeHash`] we received from the remote. - pub received_commitment: ChallengeHash, - /// The maximum payload size we received from the remote. - pub their_max_payload_size: u64, -} - /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. #[derive(Debug, Clone, Copy, Eq, PartialEq)] @@ -73,7 +51,7 @@ pub enum SessionMode { impl SessionMode { pub fn is_live(&self) -> bool { - *self == Self::Live + matches!(self, Self::Live) } } @@ -115,12 +93,3 @@ pub enum Scope { /// Resources bound by the other peer. Theirs, } - -/// Intersection between two areas of interest. -#[derive(Debug, Clone)] -pub struct AoiIntersection { - pub our_handle: AreaOfInterestHandle, - pub their_handle: AreaOfInterestHandle, - pub intersection: Area, - pub namespace: NamespaceId, -} diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 9bd2e81d28..b6f7fc6edc 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -1,9 +1,4 @@ -use std::{ - cell::RefCell, - rc::Rc, -}; - - +use std::{cell::RefCell, rc::Rc}; use crate::{ proto::{ @@ -23,51 +18,19 @@ pub struct AoiIntersection { pub namespace: NamespaceId, } -#[derive(Debug)] -struct AoiInfo { - aoi: AreaOfInterest, - namespace: NamespaceId, - // authorisation: CapabilityHandle, - // state: State, -} - -impl AoiInfo { - fn area(&self) -> &Area { - &self.aoi.area - } -} - -// #[derive(Debug, Default)] -// enum State { -// #[default] -// Submitted, -// Started { -// pending_ranges: HashSet, -// }, -// Complete, -// } - #[derive(Debug, Default, Clone)] pub struct AoiFinder(Rc>); +pub type AoiIntersectionQueue = flume::Receiver; + #[derive(Debug, Default)] struct Inner { our_handles: ResourceMap, their_handles: ResourceMap, - // queue: Queue, subscribers: Vec>, } impl AoiFinder { - pub fn close(&self) { - let mut inner = self.0.borrow_mut(); - inner.subscribers.drain(..); - } - pub fn subscribe(&self) -> flume::Receiver { - let (tx, rx) = flume::bounded(128); - self.0.borrow_mut().subscribers.push(tx); - rx - } pub async fn bind_and_send_ours( &self, sender: &ChannelSenders, @@ -75,7 +38,7 @@ impl AoiFinder { aoi: AreaOfInterest, authorisation: CapabilityHandle, ) -> Result<(), Error> { - self.bind_ours(namespace, aoi.clone())?; + self.bind(Scope::Ours, namespace, aoi.clone())?; let msg = SetupBindAreaOfInterest { area_of_interest: aoi, authorisation, @@ -84,61 +47,42 @@ impl AoiFinder { Ok(()) } - pub fn bind_ours(&self, namespace: NamespaceId, aoi: AreaOfInterest) -> Result<(), Error> { - self.0 - .borrow_mut() - .bind_validated_area_of_interest(Scope::Ours, namespace, aoi) - } - pub fn validate_and_bind_theirs( &self, their_cap: &ReadCapability, aoi: AreaOfInterest, ) -> Result<(), Error> { their_cap.try_granted_area(&aoi.area)?; - self.0.borrow_mut().bind_validated_area_of_interest( - Scope::Theirs, - their_cap.granted_namespace().id(), - aoi, - )?; + self.bind(Scope::Theirs, their_cap.granted_namespace().id(), aoi)?; Ok(()) } - // pub async fn authorise_range_eventually( - // &self, - // range: &ThreeDRange, - // receiver_handle: AreaOfInterestHandle, - // sender_handle: AreaOfInterestHandle, - // ) -> Result { - // poll_fn(|cx| { - // let mut inner = self.0.borrow_mut(); - // Pin::new(&mut inner).poll_authorise_range_eventually( - // range, - // receiver_handle, - // sender_handle, - // cx, - // ) - // }) - // .await - // } + pub fn subscribe(&self) -> flume::Receiver { + let (tx, rx) = flume::bounded(128); + self.0.borrow_mut().subscribers.push(tx); + rx + } + + pub fn close(&self) { + let mut inner = self.0.borrow_mut(); + inner.subscribers.drain(..); + } + + fn bind(&self, scope: Scope, namespace: NamespaceId, aoi: AreaOfInterest) -> Result<(), Error> { + let mut inner = self.0.borrow_mut(); + inner.bind_validated_aoi(scope, namespace, aoi) + } } impl Inner { - pub fn bind_validated_area_of_interest( + pub fn bind_validated_aoi( &mut self, scope: Scope, namespace: NamespaceId, aoi: AreaOfInterest, ) -> Result<(), Error> { - // capability.try_granted_area(&msg.area_of_interest.area)?; - // let namespace = *capability.granted_namespace(); let area = aoi.area.clone(); - let info = AoiInfo { - aoi, - // authorisation: msg.authorisation, - namespace, - // state: State::Submitted, - }; + let info = AoiInfo { aoi, namespace }; let handle = match scope { Scope::Ours => self.our_handles.bind(info), Scope::Theirs => self.their_handles.bind(info), @@ -152,14 +96,6 @@ impl Inner { // TODO: If we stored the AoIs by namespace we would need to iterate less. for (candidate_handle, candidate) in other_resources.iter() { let candidate_handle = *candidate_handle; - // Ignore areas without a capability. - // let Some(cap) = other_resources.capabilities.get(&candidate.authorisation) else { - // continue; - // }; - // Ignore areas for a different namespace. - // if *cap.granted_namespace() != namespace { - // continue; - // } if candidate.namespace != namespace { continue; } @@ -178,41 +114,20 @@ impl Inner { }; self.subscribers .retain(|sender| sender.send(intersection.clone()).is_ok()); - // for subscriber in self.subscribers { - // // TODO: async, no panic - // subscriber.send(intersection).unwrap(); - // } - // self.queue.push_back(intersection); } } Ok(()) } +} - // pub fn poll_authorise_range_eventually( - // &mut self, - // range: &ThreeDRange, - // receiver_handle: AreaOfInterestHandle, - // sender_handle: AreaOfInterestHandle, - // cx: &mut Context<'_>, - // ) -> Poll> { - // let their_aoi = ready!(self.their_handles.poll_get_eventually(sender_handle, cx)); - // let our_aoi = self.our_handles.try_get(&receiver_handle)?; - // let res = if our_aoi.namespace != their_aoi.namespace { - // Err(Error::AreaOfInterestNamespaceMismatch) - // } else if !our_aoi.area().includes_range(range) || !their_aoi.area().includes_range(range) { - // Err(Error::RangeOutsideCapability) - // } else { - // Ok(our_aoi.namespace) - // }; - // Poll::Ready(res) - // } +#[derive(Debug)] +struct AoiInfo { + aoi: AreaOfInterest, + namespace: NamespaceId, } -// impl Stream for AoiFinder { -// type Item = AoiIntersection; -// -// fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { -// let mut queue = &mut self.0.borrow_mut().queue; -// Pin::new(&mut queue).poll_next(cx) -// } -// } +impl AoiInfo { + fn area(&self) -> &Area { + &self.aoi.area + } +} diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index 9830b02ea5..c30ef9b81f 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -10,9 +10,8 @@ use crate::{ challenge::ChallengeState, keys::UserSignature, sync::{ - AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, - IntersectionHandle, PaiReplySubspaceCapability, ReadCapability, - SetupBindReadCapability, + AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, IntersectionHandle, + PaiReplySubspaceCapability, ReadCapability, SetupBindReadCapability, SubspaceCapability, }, }, @@ -47,26 +46,40 @@ impl Capabilities { &self, secret_store: &S, sender: &ChannelSenders, - our_intersection_handle: IntersectionHandle, + intersection_handle: IntersectionHandle, capability: ReadCapability, ) -> Result { - let mut inner = self.0.borrow_mut(); - let signable = inner.challenge.signable()?; - let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; + let (handle, message) = + self.bind_and_sign_ours(secret_store, intersection_handle, capability)?; + if let Some(message) = message { + sender.send(message).await?; + } + Ok(handle) + } - let (our_handle, is_new) = inner.ours.bind_if_new(capability.clone()); - if is_new { - let msg = SetupBindReadCapability { + pub fn bind_and_sign_ours( + &self, + secret_store: &S, + intersection_handle: IntersectionHandle, + capability: ReadCapability, + ) -> Result<(CapabilityHandle, Option), Error> { + let mut inner = self.0.borrow_mut(); + let (handle, is_new) = inner.ours.bind_if_new(capability.clone()); + let message = if is_new { + let signable = inner.challenge.signable()?; + let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; + Some(SetupBindReadCapability { capability, - handle: our_intersection_handle, + handle: intersection_handle, signature, - }; - sender.send(msg).await?; - } - Ok(our_handle) + }) + } else { + None + }; + Ok((handle, message)) } - pub fn bind_and_validate_theirs( + pub fn validate_and_bind_theirs( &self, capability: ReadCapability, signature: UserSignature, @@ -87,7 +100,7 @@ impl Capabilities { .await } - pub fn verify_subspace_capability( + pub fn verify_subspace_cap( &self, capability: &SubspaceCapability, signature: &UserSignature, @@ -96,7 +109,7 @@ impl Capabilities { self.0 .borrow_mut() .challenge - .verify(capability.receiver(), &signature)?; + .verify(capability.receiver(), signature)?; Ok(()) } @@ -109,7 +122,7 @@ impl Capabilities { } } - pub fn on_commitment_reveal( + pub fn received_commitment_reveal( &self, our_role: Role, their_nonce: AccessChallenge, @@ -117,15 +130,15 @@ impl Capabilities { self.0.borrow_mut().challenge.reveal(our_role, their_nonce) } - pub fn sign_subspace_capabiltiy( + pub fn sign_subspace_capabiltiy( &self, - key_store: &K, - cap: &SubspaceCapability, + secrets: &S, + cap: SubspaceCapability, handle: IntersectionHandle, ) -> Result { let inner = self.0.borrow(); let signable = inner.challenge.signable()?; - let signature = key_store.sign_user(&cap.receiver().id(), &signable)?; + let signature = secrets.sign_user(&cap.receiver().id(), &signable)?; let message = PaiReplySubspaceCapability { handle, capability: cap.clone(), diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 4fa15d946f..72ec498d72 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -6,8 +6,8 @@ use crate::{ willow::AuthorisedEntry, }, session::{ - channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, - SessionId, + aoi_finder::AoiIntersectionQueue, channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, + static_tokens::StaticTokens, Error, SessionId, }, store::{traits::Storage, Origin, Store}, }; @@ -18,6 +18,7 @@ use super::payload::{send_payload_chunked, CurrentPayload}; pub struct DataSender { store: Store, send: ChannelSenders, + aoi_queue: AoiIntersectionQueue, static_tokens: StaticTokens, session_id: SessionId, } @@ -26,26 +27,41 @@ impl DataSender { pub fn new( store: Store, send: ChannelSenders, + aoi_queue: AoiIntersectionQueue, static_tokens: StaticTokens, session_id: SessionId, ) -> Self { Self { store, send, + aoi_queue, static_tokens, session_id, } } pub async fn run(mut self) -> Result<(), Error> { - let mut stream = self.store.entries().subscribe(self.session_id); + let mut entry_stream = self.store.entries().subscribe(self.session_id); loop { - match stream.recv().await { - Ok(entry) => { - self.send_entry(entry).await?; - } - Err(broadcast::error::RecvError::Closed) => break, - Err(broadcast::error::RecvError::Lagged(_count)) => { - // TODO: Queue another reconciliation + tokio::select! { + intersection = self.aoi_queue.recv_async() => { + let Ok(intersection) = intersection else { + break; + }; + self.store.entries().watch_area( + self.session_id, + intersection.namespace, + intersection.intersection.clone(), + ); + }, + entry = entry_stream.recv() => { + match entry { + Ok(entry) => self.send_entry(entry).await?, + Err(broadcast::error::RecvError::Closed) => break, + Err(broadcast::error::RecvError::Lagged(_count)) => { + // TODO: Queue another reconciliation + } + } + } } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index c5478fb5bb..eb6cb4914b 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -6,7 +6,7 @@ use crate::{ sync::ResourceHandle, willow::Unauthorised, }, - session::{pai::PaiError, resource::MissingResource}, + session::{pai_finder::PaiError, resource::MissingResource}, store::traits::SecretStoreError, util::channel::{ReadError, WriteError}, }; diff --git a/iroh-willow/src/session/pai.rs b/iroh-willow/src/session/pai_finder.rs similarity index 99% rename from iroh-willow/src/session/pai.rs rename to iroh-willow/src/session/pai_finder.rs index 4e787a930f..5dca869114 100644 --- a/iroh-willow/src/session/pai.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -487,7 +487,7 @@ mod tests { }, willow::Path, }, - session::{pai::PaiIntersection, Error}, + session::{pai_finder::PaiIntersection, Error}, }; use super::{Input, Output, PaiFinder}; diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index c85b65375c..6dbc5e7fbd 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -14,7 +14,7 @@ use crate::{ session::channels::ChannelSenders, }; -use super::{Error}; +use super::Error; pub const DEFAULT_CHUNK_SIZE: usize = 1024 * 64; diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index b822cdd7a4..66016c2ce6 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -11,14 +11,13 @@ use crate::{ grouping::{Area, ThreeDRange}, keys::NamespaceId, sync::{ - AreaOfInterestHandle, Fingerprint, LengthyEntry, Message, - ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, - ReconciliationSendFingerprint, ReconciliationSendPayload, - ReconciliationTerminatePayload, + AreaOfInterestHandle, Fingerprint, LengthyEntry, ReconciliationAnnounceEntries, + ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, + ReconciliationSendPayload, ReconciliationTerminatePayload, }, }, session::{ - aoi_finder::{AoiIntersection}, + aoi_finder::{AoiIntersection, AoiIntersectionQueue}, channels::{ChannelSenders, MessageReceiver}, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, @@ -28,7 +27,7 @@ use crate::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Origin, Store, }, - util::{stream::Cancelable}, + util::stream::Cancelable, }; #[derive(derive_more::Debug)] @@ -54,15 +53,15 @@ type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); #[derive(Debug)] struct Targets { - aoi_intersection_rx: flume::Receiver, + intersection_queue: AoiIntersectionQueue, targets: HashMap, init_queue: VecDeque, } impl Targets { - fn new(aoi_intersection_rx: flume::Receiver) -> Self { + fn new(intersection_queue: AoiIntersectionQueue) -> Self { Self { - aoi_intersection_rx, + intersection_queue, targets: Default::default(), init_queue: Default::default(), } @@ -88,8 +87,6 @@ impl Targets { } else { self.recv_next().await } - // let target_id = self.recv_next().await?; - // Some(target_id) } async fn get_eventually(&mut self, target_id: TargetId) -> Result<&mut State, Error> { @@ -107,18 +104,11 @@ impl Targets { } async fn recv_next(&mut self) -> Option { - let intersection = self.aoi_intersection_rx.recv_async().await.ok()?; + let intersection = self.intersection_queue.recv_async().await.ok()?; let (target_id, state) = State::new(intersection); self.targets.insert(target_id, state); Some(target_id) } - - // fn init(&mut self, intersection: AoiIntersection) -> TargetId { - // let (target_id, state) = State::new(intersection); - // self.targets.insert(target_id, state); - // self.init_queue.push_back(target_id); - // target_id - // } } #[derive(Debug)] @@ -167,7 +157,7 @@ impl Reconciler { pub fn new( store: Store, recv: Cancelable>, - aoi_intersections: flume::Receiver, + aoi_intersection_queue: AoiIntersectionQueue, static_tokens: StaticTokens, session_id: SessionId, send: ChannelSenders, @@ -184,7 +174,7 @@ impl Reconciler { current_payload: Default::default(), our_range_counter: 0, their_range_counter: 0, - targets: Targets::new(aoi_intersections), + targets: Targets::new(aoi_intersection_queue), pending_announced_entries: Default::default(), static_tokens, }) @@ -200,10 +190,6 @@ impl Reconciler { } } Some(target_id) = self.targets.init_next() => { - // // TODO: Move to another place. - // if self.session.mode().is_live() { - // self.store.entries().watch_area(*self.session.id(), intersection.namespace, intersection.intersection.clone()); - // } if self.our_role.is_alfie() { self.initiate(target_id).await?; } @@ -257,8 +243,6 @@ impl Reconciler { Ok(()) } - // fn mark_our_range_covered(&mut self, handle: ) - async fn received_send_fingerprint( &mut self, message: ReconciliationSendFingerprint, @@ -358,9 +342,6 @@ impl Reconciler { if let Some(c) = NonZeroU64::new(message.count) { self.pending_announced_entries = Some(c); } - // if message.count != 0 { - // self.pending_announced_entries = Some(message.count); - // } if message.want_response { let range_count = self.next_range_count_theirs(); @@ -505,13 +486,13 @@ impl Reconciler { Ok(()) } - async fn send(&mut self, message: impl Into) -> Result<(), Error> { - let message: Message = message.into(); + async fn send(&mut self, message: impl Into) -> Result<(), Error> { + let message: ReconciliationMessage = message.into(); let want_response = match &message { - Message::ReconciliationSendFingerprint(msg) => { + ReconciliationMessage::SendFingerprint(msg) => { Some((msg.sender_handle, msg.receiver_handle)) } - Message::ReconciliationAnnounceEntries(msg) if msg.want_response => { + ReconciliationMessage::AnnounceEntries(msg) if msg.want_response => { Some((msg.sender_handle, msg.receiver_handle)) } _ => None, diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 029d2eb791..b699ef9fb4 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -3,57 +3,10 @@ use std::{ task::{Context, Poll, Waker}, }; -use crate::proto::{ - sync::{ - IsHandle, ResourceHandle, - }, -}; +use crate::proto::sync::{IsHandle, ResourceHandle}; use super::Error; -// #[derive(Debug, Default)] -// pub struct ResourceMaps { -// pub capabilities: ResourceMap, -// // pub areas_of_interest: ResourceMap, -// // pub static_tokens: ResourceMap, -// } -// impl ResourceMaps { -// // pub fn register_waker(&mut self, handle: ResourceHandle, waker: Waker) { -// // tracing::trace!(?handle, "register_notify"); -// // match handle { -// // ResourceHandle::AreaOfInterest(h) => self.areas_of_interest.register_waker(h, waker), -// // ResourceHandle::Capability(h) => self.capabilities.register_waker(h, waker), -// // ResourceHandle::StaticToken(h) => self.static_tokens.register_waker(h, waker), -// // ResourceHandle::Intersection(_h) => unimplemented!(), -// // } -// // } -// -// pub fn get(&self, selector: F, handle: H) -> Result -// where -// H: IsHandle, -// F: for<'a> Fn(&'a Self) -> &'a ResourceMap, -// R: Eq + PartialEq + Clone, -// { -// let store = selector(self); -// let res = store.try_get(&handle).cloned()?; -// Ok(res) -// } -// -// pub fn poll_get_eventually( -// &mut self, -// selector: F, -// handle: H, -// cx: &mut Context<'_>, -// ) -> Poll -// where -// F: for<'a> Fn(&'a mut Self) -> &'a mut ResourceMap, -// { -// let res = selector(self); -// let r = std::task::ready!(res.poll_get_eventually(handle, cx)); -// Poll::Ready(r.clone()) -// } -// } - #[derive(Debug)] pub struct ResourceMap { next_handle: u64, @@ -94,10 +47,6 @@ where handle } - // fn register_waker(&mut self, handle: H, notifier: Waker) { - // self.wakers.entry(handle).or_default().push_back(notifier) - // } - pub fn try_get(&self, handle: &H) -> Result<&R, MissingResource> { self.map .get(handle) @@ -106,26 +55,6 @@ where .ok_or_else(|| MissingResource((*handle).into())) } - // pub fn get(&self, handle: &H) -> Option<&R> { - // self.map.get(handle).as_ref().map(|r| &r.value) - // } - // - // pub async fn get_eventually(&mut self, handle: H) -> &R { - // std::future::poll_fn(|ctx| { - // // cannot use self.get() and self.register_waker() here due to borrow checker. - // if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { - // Poll::Ready(resource) - // } else { - // self.wakers - // .entry(handle) - // .or_default() - // .push_back(ctx.waker().to_owned()); - // Poll::Pending - // } - // }) - // .await - // } - pub fn poll_get_eventually(&mut self, handle: H, cx: &mut Context<'_>) -> Poll<&R> { // cannot use self.get() and self.register_waker() here due to borrow checker. if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { @@ -169,28 +98,6 @@ where } } -// #[derive(Debug, Clone)] -// pub struct SharedResourceMap(Rc>>); -// -// impl SharedResourceMap -// where -// H: IsHandle, -// { -// pub fn bind(&mut self, resource: R) -> H { -// self.0.borrow_mut().bind(resource) -// } -// } -// -// impl SharedResourceMap -// where -// H: IsHandle, -// R: Eq + PartialEq, -// { -// pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { -// self.0.borrow_mut().bind_if_new(resource) -// } -// } - #[derive(Debug, thiserror::Error)] #[error("missing resource {0:?}")] pub struct MissingResource(pub ResourceHandle); diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 789551767b..7a7d609270 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -2,20 +2,21 @@ use std::rc::Rc; use futures_concurrency::stream::StreamExt as _; use futures_lite::StreamExt as _; +use genawaiter::GeneratorState; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace, warn, Span}; use crate::{ auth::InterestMap, - proto::sync::{ControlIssueGuarantee, LogicalChannel, Message}, + proto::sync::{ControlIssueGuarantee, InitialTransmission, LogicalChannel, Message}, session::{ aoi_finder::AoiFinder, capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, - pai::{self, PaiFinder, PaiIntersection}, + pai_finder::{self as pai, PaiFinder, PaiIntersection}, static_tokens::StaticTokens, - Channels, Error, InitialTransmission, Role, Session, SessionId, SessionInit, + Channels, Error, Role, SessionId, SessionInit, }, store::{ traits::{SecretStorage, Storage}, @@ -33,281 +34,264 @@ use super::{ const INITIAL_GUARANTEES: u64 = u64::MAX; -impl Session { - pub async fn run( - store: Store, - channels: Channels, - cancel_token: CancellationToken, - session_id: SessionId, - our_role: Role, - init: SessionInit, - initial_transmission: InitialTransmission, - ) -> Result<(), Error> { - let Channels { send, recv } = channels; - let ChannelReceivers { - control_recv, - logical_recv: - LogicalChannelReceivers { - reconciliation_recv, - static_tokens_recv, - capability_recv, - aoi_recv, - data_recv, - intersection_recv, - }, - } = recv; +pub async fn run_session( + store: Store, + channels: Channels, + cancel_token: CancellationToken, + session_id: SessionId, + our_role: Role, + init: SessionInit, + initial_transmission: InitialTransmission, +) -> Result<(), Error> { + let Channels { send, recv } = channels; + let ChannelReceivers { + control_recv, + logical_recv: + LogicalChannelReceivers { + reconciliation_recv, + static_tokens_recv, + capability_recv, + aoi_recv, + data_recv, + intersection_recv, + }, + } = recv; - // Make all our receivers close once the cancel_token is triggered. - let control_recv = Cancelable::new(control_recv, cancel_token.clone()); - let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); - let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); - let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); - let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); - let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); - let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); + // Make all our receivers close once the cancel_token is triggered. + let control_recv = Cancelable::new(control_recv, cancel_token.clone()); + let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); + let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); + let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); + let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); + let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); + let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); - let caps = Capabilities::new( - initial_transmission.our_nonce, - initial_transmission.received_commitment, - ); - let tokens = StaticTokens::default(); - let aoi_finder = AoiFinder::default(); + let caps = Capabilities::new( + initial_transmission.our_nonce, + initial_transmission.received_commitment, + ); + let tokens = StaticTokens::default(); + let aoi_finder = AoiFinder::default(); - let tasks = Tasks::default(); + let tasks = Tasks::default(); - let interests = store.auth().find_read_caps_for_interests(init.interests)?; - let interests = Rc::new(interests); + let interests = store.auth().find_read_caps_for_interests(init.interests)?; + let interests = Rc::new(interests); - // Setup the private area intersection finder. - let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); - tasks.spawn(error_span!("pai"), { - let store = store.clone(); - let send = send.clone(); - let caps = caps.clone(); - let inbox = pai_inbox_rx - .into_stream() - .merge(intersection_recv.map(pai::Input::ReceivedMessage)); - let interests = Rc::clone(&interests); - let aoi_finder = aoi_finder.clone(); - async move { - use genawaiter::GeneratorState; - use pai::Output; - let mut gen = PaiFinder::run_gen(inbox); - loop { - match gen.async_resume().await { - GeneratorState::Yielded(output) => match output { - Output::SendMessage(message) => send.send(message).await?, - Output::NewIntersection(intersection) => { - on_pai_intersection( - &interests, - store.secrets(), - &aoi_finder, - &caps, - &send, - intersection, - ) - .await?; - } - Output::SignAndSendSubspaceCap(handle, cap) => { - let message = - caps.sign_subspace_capabiltiy(store.secrets(), &cap, handle)?; - send.send(Box::new(message)).await?; - } - }, - GeneratorState::Complete(res) => { - return res; + // Setup the private area intersection finder. + let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); + tasks.spawn(error_span!("pai"), { + let store = store.clone(); + let send = send.clone(); + let caps = caps.clone(); + let inbox = pai_inbox_rx + .into_stream() + .merge(intersection_recv.map(pai::Input::ReceivedMessage)); + let interests = Rc::clone(&interests); + let aoi_finder = aoi_finder.clone(); + async move { + let mut gen = PaiFinder::run_gen(inbox); + loop { + match gen.async_resume().await { + GeneratorState::Yielded(output) => match output { + pai::Output::SendMessage(message) => send.send(message).await?, + pai::Output::NewIntersection(intersection) => { + on_pai_intersection( + &interests, + store.secrets(), + &aoi_finder, + &caps, + &send, + intersection, + ) + .await?; } + pai::Output::SignAndSendSubspaceCap(handle, cap) => { + let message = + caps.sign_subspace_capabiltiy(store.secrets(), cap, handle)?; + send.send(Box::new(message)).await?; + } + }, + GeneratorState::Complete(res) => { + return res; } } } - }); + } + }); - // Spawn a task to handle incoming static tokens. - tasks.spawn(error_span!("stt"), { - let tokens = tokens.clone(); - async move { - while let Some(message) = static_tokens_recv.try_next().await? { - tokens.bind_theirs(message.static_token); - } - Ok(()) + // Spawn a task to handle incoming static tokens. + tasks.spawn(error_span!("stt"), { + let tokens = tokens.clone(); + async move { + while let Some(message) = static_tokens_recv.try_next().await? { + tokens.bind_theirs(message.static_token); } - }); - - // Only setup data receiver if session is configured in live mode. - if init.mode == SessionMode::Live { - tasks.spawn(error_span!("data-recv"), { - let store = store.clone(); - let tokens = tokens.clone(); - async move { - let mut data_receiver = DataReceiver::new(store, tokens, session_id); - while let Some(message) = data_recv.try_next().await? { - data_receiver.on_message(message).await?; - } - Ok(()) - } - }); - tasks.spawn(error_span!("data-send"), { - let store = store.clone(); - let tokens = tokens.clone(); - let send = send.clone(); - async move { - DataSender::new(store, send, tokens, session_id) - .run() - .await?; - Ok(()) - } - }); - tasks.spawn(error_span!("aoi-watch"), { - let store = store.clone(); - let aoi_intersections = aoi_finder.subscribe(); - async move { - while let Ok(intersection) = aoi_intersections.recv_async().await { - store.entries().watch_area( - session_id, - intersection.namespace, - intersection.intersection.clone(), - ); - } - Ok(()) - } - }); + Ok(()) } + }); - // Spawn a task to handle incoming capabilities. - tasks.spawn(error_span!("cap-recv"), { - let to_pai = pai_inbox_tx.clone(); - let caps = caps.clone(); + // Only setup data receiver if session is configured in live mode. + if init.mode == SessionMode::Live { + tasks.spawn(error_span!("data-recv"), { + let store = store.clone(); + let tokens = tokens.clone(); async move { - while let Some(message) = capability_recv.try_next().await? { - let handle = message.handle; - caps.bind_and_validate_theirs(message.capability, message.signature)?; - to_pai - .send_async(pai::Input::ReceivedReadCapForIntersection(handle)) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; + let mut data_receiver = DataReceiver::new(store, tokens, session_id); + while let Some(message) = data_recv.try_next().await? { + data_receiver.on_message(message).await?; } Ok(()) } }); - - // Spawn a task to handle incoming areas of interest. - tasks.spawn(error_span!("aoi-recv"), { - let aoi_finder = aoi_finder.clone(); - let caps = caps.clone(); + tasks.spawn(error_span!("data-send"), { + let store = store.clone(); + let tokens = tokens.clone(); + let send = send.clone(); + let aoi_intersections = aoi_finder.subscribe(); async move { - while let Some(message) = aoi_recv.try_next().await? { - let cap = caps.get_theirs_eventually(message.authorisation).await; - aoi_finder.validate_and_bind_theirs(&cap, message.area_of_interest)?; - } - aoi_finder.close(); + DataSender::new(store, send, aoi_intersections, tokens, session_id) + .run() + .await?; Ok(()) } }); + } - // Spawn a task to handle reconciliation messages - tasks.spawn(error_span!("rec"), { - let cancel_token = cancel_token.clone(); - let aoi_intersections = aoi_finder.subscribe(); - let reconciler = Reconciler::new( - store.clone(), - reconciliation_recv, - aoi_intersections, - tokens.clone(), - session_id, - send.clone(), - our_role, - )?; - async move { - let res = reconciler.run().await; - if res.is_ok() && !init.mode.is_live() { - debug!("reconciliation complete and not in live mode: trigger cancel"); - cancel_token.cancel(); - } - res + // Spawn a task to handle incoming capabilities. + tasks.spawn(error_span!("cap-recv"), { + let to_pai = pai_inbox_tx.clone(); + let caps = caps.clone(); + async move { + while let Some(message) = capability_recv.try_next().await? { + let handle = message.handle; + caps.validate_and_bind_theirs(message.capability, message.signature)?; + to_pai + .send_async(pai::Input::ReceivedReadCapForIntersection(handle)) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; } - }); + Ok(()) + } + }); - // Spawn a task to handle control messages - tasks.spawn(error_span!("ctl-recv"), { - let cancel_token = cancel_token.clone(); - let fut = control_loop( - our_role, - interests, - caps, - send.clone(), - tasks.clone(), - control_recv, - pai_inbox_tx, - ); - async move { - let res = fut.await; - if res.is_ok() { - debug!("control channel closed: trigger cancel"); - cancel_token.cancel(); - } - res + // Spawn a task to handle incoming areas of interest. + tasks.spawn(error_span!("aoi-recv"), { + let aoi_finder = aoi_finder.clone(); + let caps = caps.clone(); + async move { + while let Some(message) = aoi_recv.try_next().await? { + let cap = caps.get_theirs_eventually(message.authorisation).await; + aoi_finder.validate_and_bind_theirs(&cap, message.area_of_interest)?; } - }); + aoi_finder.close(); + Ok(()) + } + }); - // Wait until the session is cancelled, or until a task fails. - let result = loop { - tokio::select! { - _ = cancel_token.cancelled() => { - debug!("cancel token triggered: close session"); - break Ok(()); - }, - Some((span, result)) = tasks.join_next() => { - let _guard = span.enter(); - trace!(?result, remaining = tasks.remaining_tasks(), "task complete"); - match result { - Err(err) => { - warn!(?err, "session task paniced: abort session"); - break Err(Error::TaskFailed(err)); - }, - Ok(Err(err)) => { - warn!(?err, "session task failed: abort session"); - break Err(err); - } - Ok(Ok(())) => {} - } - }, + // Spawn a task to handle reconciliation messages + tasks.spawn(error_span!("rec"), { + let cancel_token = cancel_token.clone(); + let aoi_intersections = aoi_finder.subscribe(); + let reconciler = Reconciler::new( + store.clone(), + reconciliation_recv, + aoi_intersections, + tokens.clone(), + session_id, + send.clone(), + our_role, + )?; + async move { + let res = reconciler.run().await; + if res.is_ok() && !init.mode.is_live() { + debug!("reconciliation complete and not in live mode: trigger cancel"); + cancel_token.cancel(); } - }; - - if result.is_err() { - debug!("aborting session"); - tasks.abort_all(); - } else { - debug!("closing session"); + res } + }); - // Unsubscribe from the store. This stops the data send task. - store.entries().unsubscribe(&session_id); - - // Wait for remaining tasks to terminate to catch any panics. - // TODO: Add timeout? - while let Some((span, result)) = tasks.join_next().await { - let _guard = span.enter(); - trace!( - ?result, - remaining = tasks.remaining_tasks(), - "task complete" - ); - match result { - Err(err) if err.is_cancelled() => {} - Err(err) => warn!("task paniced: {err:?}"), - Ok(Err(err)) => warn!("task failed: {err:?}"), - Ok(Ok(())) => {} + // Spawn a task to handle control messages + tasks.spawn(error_span!("ctl-recv"), { + let cancel_token = cancel_token.clone(); + let fut = control_loop( + our_role, + interests, + caps, + send.clone(), + tasks.clone(), + control_recv, + pai_inbox_tx, + ); + async move { + let res = fut.await; + if res.is_ok() { + debug!("control channel closed: trigger cancel"); + cancel_token.cancel(); } + res } + }); - // Close our channel senders. - // This will stop the network send loop after all pending data has been sent. - send.close_all(); + // Wait until the session is cancelled, or until a task fails. + let result = loop { + tokio::select! { + _ = cancel_token.cancelled() => { + debug!("cancel token triggered: close session"); + break Ok(()); + }, + Some((span, result)) = tasks.join_next() => { + let _guard = span.enter(); + trace!(?result, remaining = tasks.remaining_tasks(), "task complete"); + match result { + Err(err) => { + warn!(?err, "session task paniced: abort session"); + break Err(Error::TaskFailed(err)); + }, + Ok(Err(err)) => { + warn!(?err, "session task failed: abort session"); + break Err(err); + } + Ok(Ok(())) => {} + } + }, + } + }; - debug!(success = result.is_ok(), "session complete"); - result + if result.is_err() { + debug!("aborting session"); + tasks.abort_all(); + } else { + debug!("closing session"); } + + // Unsubscribe from the store. This stops the data send task. + store.entries().unsubscribe(&session_id); + + // Wait for remaining tasks to terminate to catch any panics. + // TODO: Add timeout? + while let Some((span, result)) = tasks.join_next().await { + let _guard = span.enter(); + trace!( + ?result, + remaining = tasks.remaining_tasks(), + "task complete" + ); + match result { + Err(err) if err.is_cancelled() => {} + Err(err) => warn!("task paniced: {err:?}"), + Ok(Err(err)) => warn!("task failed: {err:?}"), + Ok(Ok(())) => {} + } + } + + // Close our channel senders. + // This will stop the network send loop after all pending data has been sent. + send.close_all(); + + debug!(success = result.is_ok(), "session complete"); + result } pub type Tasks = SharedJoinMap>; @@ -322,8 +306,6 @@ async fn control_loop( to_pai: flume::Sender, ) -> Result<(), Error> { debug!(role = ?our_role, "start session"); - let mut commitment_revealed = false; - // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; sender.send(reveal_message).await?; @@ -337,14 +319,11 @@ async fn control_loop( sender.send(msg).await?; } + // Handle incoming messages on the control channel. while let Some(message) = control_recv.try_next().await? { match message { Message::CommitmentReveal(msg) => { - caps.on_commitment_reveal(our_role, msg.nonce)?; - if commitment_revealed { - return Err(Error::InvalidMessageInCurrentState)?; - } - commitment_revealed = true; + caps.received_commitment_reveal(our_role, msg.nonce)?; let submit_interests_fut = { let to_pai = to_pai.clone(); @@ -373,7 +352,7 @@ async fn control_loop( .map_err(|_| Error::InvalidState("PAI actor dead"))?; } Message::PaiReplySubspaceCapability(msg) => { - caps.verify_subspace_capability(&msg.capability, &msg.signature)?; + caps.verify_subspace_cap(&msg.capability, &msg.signature)?; to_pai .send_async(pai::Input::ReceivedVerifiedSubspaceCapReply( msg.handle, From c6adf66d9ad5f2c785f368885885970e0b9b06d5 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 8 Jul 2024 23:43:09 +0200 Subject: [PATCH 082/198] wip: refactor reconciler and add event bus --- iroh-willow/src/proto/sync.rs | 12 + iroh-willow/src/session.rs | 1 + iroh-willow/src/session/data.rs | 6 +- iroh-willow/src/session/payload.rs | 22 +- iroh-willow/src/session/reconciler.rs | 645 ++++++++++++++------------ iroh-willow/src/session/run.rs | 10 + iroh-willow/src/store/traits.rs | 10 +- 7 files changed, 398 insertions(+), 308 deletions(-) diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 8bb8884169..043f60f15c 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -704,6 +704,12 @@ pub struct ReconciliationSendFingerprint { pub covers: Option, } +impl ReconciliationSendFingerprint { + pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { + (self.receiver_handle, self.sender_handle) + } +} + /// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ReconciliationAnnounceEntries { @@ -725,6 +731,12 @@ pub struct ReconciliationAnnounceEntries { pub covers: Option, } +impl ReconciliationAnnounceEntries { + pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { + (self.receiver_handle, self.sender_handle) + } +} + /// Transmit a [`LengthyEntry`] as part of 3d range-based set reconciliation. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationSendEntry { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 1d30aa4e02..8ffeefaaba 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -7,6 +7,7 @@ mod capabilities; pub mod channels; mod data; mod error; +mod events; mod pai_finder; mod payload; mod reconciler; diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 72ec498d72..a02d91176c 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -130,7 +130,7 @@ impl DataReceiver { } async fn on_send_entry(&mut self, message: DataSendEntry) -> Result<(), Error> { - self.current_payload.assert_inactive()?; + self.current_payload.ensure_none()?; let authorised_entry = self .static_tokens .authorise_entry_eventually( @@ -142,8 +142,10 @@ impl DataReceiver { self.store .entries() .ingest(&authorised_entry, Origin::Remote(self.session_id))?; + let entry = authorised_entry.into_entry(); + // TODO: handle offset self.current_payload - .set(authorised_entry.into_entry(), None)?; + .set(entry.payload_digest, entry.payload_length)?; Ok(()) } diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index 6dbc5e7fbd..c29c6045f6 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -7,10 +7,7 @@ use iroh_blobs::{ }; use crate::{ - proto::{ - sync::Message, - willow::{Entry, PayloadDigest}, - }, + proto::{sync::Message, willow::PayloadDigest}, session::channels::ChannelSenders, }; @@ -53,7 +50,7 @@ pub struct CurrentPayload(Option); #[derive(Debug)] struct CurrentPayloadInner { - entry: Entry, + payload_digest: PayloadDigest, expected_length: u64, received_length: u64, writer: Option, @@ -71,13 +68,16 @@ impl CurrentPayload { // Self::default() // } - pub fn set(&mut self, entry: Entry, expected_length: Option) -> Result<(), Error> { + pub fn set( + &mut self, + payload_digest: PayloadDigest, + expected_length: u64, + ) -> Result<(), Error> { if self.0.is_some() { return Err(Error::InvalidMessageInCurrentState); } - let expected_length = expected_length.unwrap_or(entry.payload_length); self.0 = Some(CurrentPayloadInner { - entry, + payload_digest, writer: None, expected_length, received_length: 0, @@ -129,10 +129,10 @@ impl CurrentPayload { .ok_or_else(|| Error::InvalidMessageInCurrentState)?; drop(writer.sender); let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; - if *tag.hash() != state.entry.payload_digest { + if *tag.hash() != state.payload_digest { return Err(Error::PayloadDigestMismatch); } - if len != state.entry.payload_length { + if len != state.expected_length { return Err(Error::PayloadDigestMismatch); } // TODO: protect from gc @@ -148,7 +148,7 @@ impl CurrentPayload { pub fn is_active(&self) -> bool { self.0.as_ref().map(|s| s.writer.is_some()).unwrap_or(false) } - pub fn assert_inactive(&self) -> Result<(), Error> { + pub fn ensure_none(&self) -> Result<(), Error> { if self.is_active() { Err(Error::InvalidMessageInCurrentState) } else { diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 66016c2ce6..0a71a3531e 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,9 +1,12 @@ use std::{ - collections::{HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet}, num::NonZeroU64, + ops::ControlFlow, }; +use bytes::Bytes; use futures_lite::StreamExt; +use iroh_blobs::store::Store as PayloadStore; use tracing::{debug, trace}; use crate::{ @@ -15,10 +18,12 @@ use crate::{ ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, ReconciliationSendPayload, ReconciliationTerminatePayload, }, + willow::PayloadDigest, }, session::{ aoi_finder::{AoiIntersection, AoiIntersectionQueue}, channels::{ChannelSenders, MessageReceiver}, + events::{Event, EventEmitter}, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, Error, Role, SessionId, @@ -32,127 +37,15 @@ use crate::{ #[derive(derive_more::Debug)] pub struct Reconciler { - session_id: SessionId, - our_role: Role, - - store: Store, - snapshot: ::Snapshot, - send: ChannelSenders, + shared: Shared, recv: Cancelable>, - - static_tokens: StaticTokens, - targets: Targets, - current_payload: CurrentPayload, - - our_range_counter: u64, - their_range_counter: u64, - pending_announced_entries: Option, + events: EventEmitter, + targets: TargetMap, + current_entry: CurrentEntry, } type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); -#[derive(Debug)] -struct Targets { - intersection_queue: AoiIntersectionQueue, - targets: HashMap, - init_queue: VecDeque, -} - -impl Targets { - fn new(intersection_queue: AoiIntersectionQueue) -> Self { - Self { - intersection_queue, - targets: Default::default(), - init_queue: Default::default(), - } - } - fn iter(&self) -> impl Iterator { - self.targets.values() - } - - fn get(&self, target: &TargetId) -> Result<&State, Error> { - self.targets - .get(target) - .ok_or(Error::MissingResource(target.1.into())) - } - fn get_mut(&mut self, target: &TargetId) -> Result<&mut State, Error> { - self.targets - .get_mut(target) - .ok_or(Error::MissingResource(target.1.into())) - } - - async fn init_next(&mut self) -> Option { - if let Some(target_id) = self.init_queue.pop_front() { - Some(target_id) - } else { - self.recv_next().await - } - } - - async fn get_eventually(&mut self, target_id: TargetId) -> Result<&mut State, Error> { - if self.targets.contains_key(&target_id) { - return Ok(self.targets.get_mut(&target_id).unwrap()); - } - - while let Some(next_target_id) = self.recv_next().await { - self.init_queue.push_back(next_target_id); - if next_target_id == target_id { - return Ok(self.targets.get_mut(&target_id).unwrap()); - } - } - Err(Error::InvalidState("aoi finder closed")) - } - - async fn recv_next(&mut self) -> Option { - let intersection = self.intersection_queue.recv_async().await.ok()?; - let (target_id, state) = State::new(intersection); - self.targets.insert(target_id, state); - Some(target_id) - } -} - -#[derive(Debug)] -struct State { - namespace: NamespaceId, - area: Area, - our_uncovered_ranges: HashSet, - started: bool, -} - -impl State { - pub fn new(intersection: AoiIntersection) -> (TargetId, Self) { - let target_id = (intersection.our_handle, intersection.their_handle); - let state = Self { - namespace: intersection.namespace, - area: intersection.intersection, - our_uncovered_ranges: Default::default(), - started: false, - }; - (target_id, state) - } - - pub fn is_complete(&self) -> bool { - self.started && self.our_uncovered_ranges.is_empty() - } - - pub fn mark_our_range_pending(&mut self, range_count: u64) { - tracing::warn!("mark ours pending: {range_count}"); - self.started = true; - self.our_uncovered_ranges.insert(range_count); - } - - pub fn mark_our_range_covered(&mut self, range_count: u64) -> Result<(), Error> { - tracing::warn!(?self, "mark ours covered: {range_count}"); - if !self.our_uncovered_ranges.remove(&range_count) { - Err(Error::InvalidState( - "attempted to mark an unknown range as covered", - )) - } else { - Ok(()) - } - } -} - impl Reconciler { pub fn new( store: Store, @@ -162,21 +55,22 @@ impl Reconciler { session_id: SessionId, send: ChannelSenders, our_role: Role, + events: EventEmitter, ) -> Result { - let snapshot = store.entries().snapshot()?; - Ok(Self { - session_id, - send, - our_role, + let shared = Shared { store, - recv, - snapshot, - current_payload: Default::default(), - our_range_counter: 0, - their_range_counter: 0, - targets: Targets::new(aoi_intersection_queue), - pending_announced_entries: Default::default(), + our_role, + send, static_tokens, + session_id, + }; + Ok(Self { + shared, + recv, + targets: TargetMap::new(aoi_intersection_queue), + current_entry: Default::default(), + + events, }) } @@ -186,78 +80,321 @@ impl Reconciler { message = self.recv.try_next() => { match message? { None => break, - Some(message) => self.on_message(message).await?, + Some(message) => match self.received_message(message).await? { + ControlFlow::Continue(_) => {} + ControlFlow::Break(_) => { + debug!("reconciliation complete"); + break; + } + } } } - Some(target_id) = self.targets.init_next() => { - if self.our_role.is_alfie() { - self.initiate(target_id).await?; - } + Ok(intersection) = self.targets.aoi_intersection_queue.recv_async() => { + let intersection = intersection; + let area = intersection.intersection.clone(); + self.targets.init_target(&self.shared, intersection).await?; + self.events.send(Event::AreaIntersection(area)).await?; } } - if self.is_complete() { - debug!("reconciliation complete"); - break; - } } Ok(()) } - fn is_complete(&self) -> bool { - if self.current_payload.is_active() { - return false; - } - if self.pending_announced_entries.is_some() { - return false; - } - self.targets.iter().all(|t| t.is_complete()) - } - - async fn on_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { + async fn received_message( + &mut self, + message: ReconciliationMessage, + ) -> Result, Error> { match message { ReconciliationMessage::SendFingerprint(message) => { - self.received_send_fingerprint(message).await? + self.targets + .get_eventually(&self.shared, &message.handles()) + .await? + .received_send_fingerprint(&self.shared, message) + .await?; } ReconciliationMessage::AnnounceEntries(message) => { - let res = self.received_announce_entries(message).await; - tracing::warn!("received_announce_entries DONE: {res:?}"); - res?; + let target_id = message.handles(); + self.current_entry + .received_announce_entries(target_id, message.count)?; + let target = self + .targets + .get_eventually(&self.shared, &target_id) + .await?; + target + .received_announce_entries(&self.shared, message) + .await?; + if target.is_complete() && self.current_entry.is_none() { + return self.complete_target(target_id).await; + } + } + ReconciliationMessage::SendEntry(message) => { + let authorised_entry = self + .shared + .static_tokens + .authorise_entry_eventually( + message.entry.entry, + message.static_token_handle, + message.dynamic_token, + ) + .await?; + self.current_entry.received_entry( + authorised_entry.entry().payload_digest, + message.entry.available, + )?; + self.shared + .store + .entries() + .ingest(&authorised_entry, Origin::Remote(self.shared.session_id))?; } - ReconciliationMessage::SendEntry(message) => self.received_send_entry(message).await?, ReconciliationMessage::SendPayload(message) => { - self.received_send_payload(message).await? + self.current_entry + .received_send_payload(self.shared.store.payloads(), message.bytes) + .await?; } - ReconciliationMessage::TerminatePayload(message) => { - self.received_terminate_payload(message).await? + ReconciliationMessage::TerminatePayload(_message) => { + if let Some(completed_target) = + self.current_entry.received_terminate_payload().await? + { + let target = self + .targets + .map + .get(&completed_target) + .expect("target to exist"); + if target.is_complete() { + return self.complete_target(target.id()).await; + } + } } }; + Ok(ControlFlow::Continue(())) + } + + pub async fn complete_target(&mut self, id: TargetId) -> Result, Error> { + let target = self + .targets + .map + .remove(&id) + .ok_or(Error::InvalidMessageInCurrentState)?; + let event = Event::Reconciled(target.area); + self.events.send(event).await?; + if self.targets.map.is_empty() { + Ok(ControlFlow::Break(())) + } else { + Ok(ControlFlow::Continue(())) + } + } +} + +#[derive(Debug)] +struct TargetMap { + map: HashMap>, + aoi_intersection_queue: AoiIntersectionQueue, +} + +impl TargetMap { + pub fn new(aoi_intersection_queue: AoiIntersectionQueue) -> Self { + Self { + map: Default::default(), + aoi_intersection_queue, + } + } + pub async fn get_eventually( + &mut self, + shared: &Shared, + requested_id: &TargetId, + ) -> Result<&mut Target, Error> { + tracing::info!("aoi wait: {requested_id:?}"); + if !self.map.contains_key(requested_id) { + self.wait_for_target(shared, requested_id).await?; + } + return Ok(self.map.get_mut(requested_id).unwrap()); + } + + async fn wait_for_target( + &mut self, + shared: &Shared, + requested_id: &TargetId, + ) -> Result<(), Error> { + loop { + let intersection = self + .aoi_intersection_queue + .recv_async() + .await + .map_err(|_| Error::InvalidState("aoi finder closed"))?; + let id = self.init_target(shared, intersection).await?; + if id == *requested_id { + break Ok(()); + } + } + } + + async fn init_target( + &mut self, + shared: &Shared, + intersection: AoiIntersection, + ) -> Result { + let snapshot = shared.store.entries().snapshot()?; + let target = Target::init(snapshot, shared, intersection).await?; + let id = target.id(); + tracing::info!("init {id:?}"); + self.map.insert(id, target); + Ok(id) + } +} + +#[derive(Debug, Default)] +struct CurrentEntry(Option); + +impl CurrentEntry { + pub fn is_none(&self) -> bool { + self.0.is_none() + } + + pub fn received_announce_entries( + &mut self, + target: TargetId, + count: u64, + ) -> Result, Error> { + if self.0.is_some() { + return Err(Error::InvalidMessageInCurrentState); + } + if let Some(count) = NonZeroU64::new(count) { + self.0 = Some(EntryState { + target, + remaining: Some(count), + payload: CurrentPayload::default(), + }); + Ok(None) + } else { + Ok(Some(target)) + } + } + + pub fn received_entry( + &mut self, + payload_digest: PayloadDigest, + expected_length: u64, + ) -> Result<(), Error> { + let state = self.get_mut()?; + state.payload.ensure_none()?; + state.remaining = match state.remaining.take() { + None => return Err(Error::InvalidMessageInCurrentState), + Some(c) => NonZeroU64::new(c.get().saturating_sub(1)), + }; + state.payload.set(payload_digest, expected_length)?; Ok(()) } - async fn initiate(&mut self, target_id: TargetId) -> Result<(), Error> { - let target = self.targets.get(&target_id)?; - let range = target.area.into_range(); - let fingerprint = self.snapshot.fingerprint(target.namespace, &range)?; - self.send_fingerprint(target_id, range, fingerprint, None) + pub async fn received_send_payload( + &mut self, + store: &P, + bytes: Bytes, + ) -> Result<(), Error> { + self.get_mut()?.payload.recv_chunk(store, bytes).await?; + Ok(()) + } + + pub async fn received_terminate_payload(&mut self) -> Result, Error> { + let s = self.get_mut()?; + s.payload.finalize().await?; + if s.remaining.is_none() { + let target_id = s.target; + self.0 = None; + Ok(Some(target_id)) + } else { + Ok(None) + } + } + + pub fn get_mut(&mut self) -> Result<&mut EntryState, Error> { + match self.0.as_mut() { + Some(s) => Ok(s), + None => Err(Error::InvalidMessageInCurrentState), + } + } +} + +#[derive(Debug)] +struct EntryState { + target: TargetId, + remaining: Option, + payload: CurrentPayload, +} + +#[derive(Debug)] +struct Shared { + store: Store, + our_role: Role, + send: ChannelSenders, + static_tokens: StaticTokens, + session_id: SessionId, +} + +#[derive(Debug)] +struct Target { + snapshot: ::Snapshot, + + our_handle: AreaOfInterestHandle, + their_handle: AreaOfInterestHandle, + namespace: NamespaceId, + area: Area, + + our_uncovered_ranges: HashSet, + started: bool, + + our_range_counter: u64, + their_range_counter: u64, +} + +impl Target { + fn id(&self) -> TargetId { + (self.our_handle, self.their_handle) + } + async fn init( + snapshot: ::Snapshot, + shared: &Shared, + intersection: AoiIntersection, + ) -> Result { + let mut this = Target { + snapshot, + our_handle: intersection.our_handle, + their_handle: intersection.their_handle, + namespace: intersection.namespace, + area: intersection.intersection, + our_uncovered_ranges: Default::default(), + started: false, + our_range_counter: 0, + their_range_counter: 0, + }; + if shared.our_role == Role::Alfie { + this.initiate(shared).await?; + } + Ok(this) + } + + async fn initiate(&mut self, shared: &Shared) -> Result<(), Error> { + let range = self.area.into_range(); + let fingerprint = self.snapshot.fingerprint(self.namespace, &range)?; + self.send_fingerprint(shared, range, fingerprint, None) .await?; Ok(()) } + pub fn is_complete(&self) -> bool { + self.started && self.our_uncovered_ranges.is_empty() + } + async fn received_send_fingerprint( &mut self, + shared: &Shared, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { - let range_count = self.next_range_count_theirs(); - - let target_id = (message.receiver_handle, message.sender_handle); - let target = self.targets.get_eventually(target_id).await?; - let namespace = target.namespace; - if let Some(range_count) = message.covers { - target.mark_our_range_covered(range_count)?; + self.mark_our_range_covered(range_count)?; } + let range_count = self.next_range_count_theirs(); - let our_fingerprint = self.snapshot.fingerprint(namespace, &message.range)?; + let our_fingerprint = self.snapshot.fingerprint(self.namespace, &message.range)?; // case 1: fingerprint match. if our_fingerprint == message.fingerprint { @@ -270,30 +407,21 @@ impl Reconciler { receiver_handle: message.sender_handle, covers: Some(range_count), }; - self.send(reply).await?; + shared.send.send(reply).await?; } // case 2: fingerprint is empty else if message.fingerprint.is_empty() { - self.announce_and_send_entries( - target_id, - namespace, - &message.range, - true, - Some(range_count), - None, - ) - .await?; + self.announce_and_send_entries(shared, &message.range, true, Some(range_count), None) + .await?; } // case 3: fingerprint doesn't match and is non-empty else { // reply by splitting the range into parts unless it is very short - // self.split_range_and_send_parts(target_id, namespace, &message.range, range_count) - // .await?; // TODO: Expose let split_opts = SplitOpts::default(); let snapshot = self.snapshot.clone(); let mut iter = snapshot - .split_range(namespace, &message.range, &split_opts)? + .split_range(self.namespace, &message.range, &split_opts)? .peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; @@ -302,8 +430,7 @@ impl Reconciler { match action { SplitAction::SendEntries(count) => { self.announce_and_send_entries( - target_id, - namespace, + shared, &subrange, true, covers, @@ -312,7 +439,7 @@ impl Reconciler { .await?; } SplitAction::SendFingerprint(fingerprint) => { - self.send_fingerprint(target_id, subrange, fingerprint, covers) + self.send_fingerprint(shared, subrange, fingerprint, covers) .await?; } } @@ -321,112 +448,47 @@ impl Reconciler { Ok(()) } + async fn received_announce_entries( &mut self, + shared: &Shared, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { - trace!("received_announce_entries start"); - self.current_payload.assert_inactive()?; - if self.pending_announced_entries.is_some() { - return Err(Error::InvalidMessageInCurrentState); - } - - let target_id = (message.receiver_handle, message.sender_handle); - let target = self.targets.get_eventually(target_id).await?; - let namespace = target.namespace; - if let Some(range_count) = message.covers { - target.mark_our_range_covered(range_count)?; - } - - if let Some(c) = NonZeroU64::new(message.count) { - self.pending_announced_entries = Some(c); + self.mark_our_range_covered(range_count)?; } if message.want_response { let range_count = self.next_range_count_theirs(); - self.announce_and_send_entries( - target_id, - namespace, - &message.range, - false, - Some(range_count), - None, - ) - .await?; + self.announce_and_send_entries(shared, &message.range, false, Some(range_count), None) + .await?; } trace!("received_announce_entries done"); Ok(()) } - fn decrement_pending_announced_entries(&mut self) -> Result<(), Error> { - self.pending_announced_entries = match self.pending_announced_entries.take() { - None => return Err(Error::InvalidMessageInCurrentState), - Some(c) => NonZeroU64::new(c.get().saturating_sub(1)), - }; - Ok(()) - } - - async fn received_send_entry(&mut self, message: ReconciliationSendEntry) -> Result<(), Error> { - self.current_payload.assert_inactive()?; - self.decrement_pending_announced_entries()?; - let authorised_entry = self - .static_tokens - .authorise_entry_eventually( - message.entry.entry.clone(), - message.static_token_handle, - message.dynamic_token, - ) - .await?; - self.store - .entries() - .ingest(&authorised_entry, Origin::Remote(self.session_id))?; - self.current_payload - .set(message.entry.entry, Some(message.entry.available))?; - Ok(()) - } - - async fn received_send_payload( - &mut self, - message: ReconciliationSendPayload, - ) -> Result<(), Error> { - self.current_payload - .recv_chunk(self.store.payloads(), message.bytes) - .await?; - Ok(()) - } - - async fn received_terminate_payload( - &mut self, - _message: ReconciliationTerminatePayload, - ) -> Result<(), Error> { - self.current_payload.finalize().await?; - Ok(()) - } - async fn send_fingerprint( &mut self, - target_id: TargetId, + shared: &Shared, range: ThreeDRange, fingerprint: Fingerprint, covers: Option, ) -> anyhow::Result<()> { + self.mark_our_next_range_pending(); let msg = ReconciliationSendFingerprint { range, fingerprint, - sender_handle: target_id.0, - receiver_handle: target_id.1, + sender_handle: self.our_handle, + receiver_handle: self.their_handle, covers, }; - self.send(msg).await?; + shared.send.send(msg).await?; Ok(()) } - #[allow(clippy::too_many_arguments)] async fn announce_and_send_entries( &mut self, - target_id: TargetId, - namespace: NamespaceId, + shared: &Shared, range: &ThreeDRange, want_response: bool, covers: Option, @@ -434,29 +496,34 @@ impl Reconciler { ) -> Result<(), Error> { let our_entry_count = match our_entry_count { Some(count) => count, - None => self.snapshot.count(namespace, range)?, + None => self.snapshot.count(self.namespace, range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), count: our_entry_count, want_response, will_sort: false, // todo: sorted? - sender_handle: target_id.0, - receiver_handle: target_id.1, + sender_handle: self.our_handle, + receiver_handle: self.their_handle, covers, }; + if want_response { + self.mark_our_next_range_pending(); + } + shared.send.send(msg).await?; - self.send(msg).await?; - let snapshot = self.snapshot.clone(); - for authorised_entry in snapshot.get_entries_with_authorisation(namespace, range) { + for authorised_entry in self + .snapshot + .get_entries_with_authorisation(self.namespace, range) + { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); let (static_token, dynamic_token) = token.into_parts(); // TODO: partial payloads let available = entry.payload_length; - let static_token_handle = self + let static_token_handle = shared .static_tokens - .bind_and_send_ours(static_token, &self.send) + .bind_and_send_ours(static_token, &shared.send) .await?; let digest = entry.payload_digest; let msg = ReconciliationSendEntry { @@ -464,7 +531,7 @@ impl Reconciler { static_token_handle, dynamic_token, }; - self.send(msg).await?; + shared.send.send(msg).await?; // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; @@ -472,38 +539,34 @@ impl Reconciler { if send_payloads && send_payload_chunked( digest, - self.store.payloads(), - &self.send, + shared.store.payloads(), + &shared.send, chunk_size, |bytes| ReconciliationSendPayload { bytes }.into(), ) .await? { let msg = ReconciliationTerminatePayload; - self.send(msg).await?; + shared.send.send(msg).await?; } } Ok(()) } - async fn send(&mut self, message: impl Into) -> Result<(), Error> { - let message: ReconciliationMessage = message.into(); - let want_response = match &message { - ReconciliationMessage::SendFingerprint(msg) => { - Some((msg.sender_handle, msg.receiver_handle)) - } - ReconciliationMessage::AnnounceEntries(msg) if msg.want_response => { - Some((msg.sender_handle, msg.receiver_handle)) - } - _ => None, - }; - if let Some(target_id) = want_response { - let range_count = self.next_range_count_ours(); - let target = self.targets.get_mut(&target_id)?; - target.mark_our_range_pending(range_count); + fn mark_our_next_range_pending(&mut self) { + let range_count = self.next_range_count_ours(); + self.started = true; + self.our_uncovered_ranges.insert(range_count); + } + + fn mark_our_range_covered(&mut self, range_count: u64) -> Result<(), Error> { + if !self.our_uncovered_ranges.remove(&range_count) { + Err(Error::InvalidState( + "attempted to mark an unknown range as covered", + )) + } else { + Ok(()) } - self.send.send(message).await?; - Ok(()) } fn next_range_count_ours(&mut self) -> u64 { diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 7a7d609270..406a5a92e5 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -14,6 +14,7 @@ use crate::{ aoi_finder::AoiFinder, capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, + events::{Event, EventEmitter}, pai_finder::{self as pai, PaiFinder, PaiIntersection}, static_tokens::StaticTokens, Channels, Error, Role, SessionId, SessionInit, @@ -66,6 +67,8 @@ pub async fn run_session( let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); + let events = EventEmitter::default(); + let caps = Capabilities::new( initial_transmission.our_nonce, initial_transmission.received_commitment, @@ -89,6 +92,7 @@ pub async fn run_session( .merge(intersection_recv.map(pai::Input::ReceivedMessage)); let interests = Rc::clone(&interests); let aoi_finder = aoi_finder.clone(); + let events = events.clone(); async move { let mut gen = PaiFinder::run_gen(inbox); loop { @@ -96,6 +100,11 @@ pub async fn run_session( GeneratorState::Yielded(output) => match output { pai::Output::SendMessage(message) => send.send(message).await?, pai::Output::NewIntersection(intersection) => { + events + .send(Event::CapabilityIntersection( + intersection.authorisation.clone(), + )) + .await?; on_pai_intersection( &interests, store.secrets(), @@ -201,6 +210,7 @@ pub async fn run_session( session_id, send.clone(), our_role, + events, )?; async move { let res = reconciler.run().await; diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 52b615c300..8a70d9c71a 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -1,3 +1,5 @@ +use std::fmt::Debug; + use anyhow::Result; use crate::proto::{ @@ -8,7 +10,7 @@ use crate::proto::{ willow::{AuthorisedEntry, Entry, NamespaceId}, }; -pub trait Storage: Clone + 'static { +pub trait Storage: Debug + Clone + 'static { type Entries: EntryStorage; type Secrets: SecretStorage; type Payloads: iroh_blobs::store::Store; @@ -17,7 +19,7 @@ pub trait Storage: Clone + 'static { fn payloads(&self) -> &Self::Payloads; } -pub trait SecretStorage: std::fmt::Debug + Clone + 'static { +pub trait SecretStorage: Debug + Clone + 'static { fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), SecretStoreError>; fn get_user(&self, id: &UserId) -> Option; fn get_namespace(&self, id: &NamespaceId) -> Option; @@ -62,7 +64,7 @@ pub trait SecretStorage: std::fmt::Debug + Clone + 'static { } } -pub trait EntryStorage: EntryReader + Clone + std::fmt::Debug + 'static { +pub trait EntryStorage: EntryReader + Clone + Debug + 'static { type Reader: EntryReader; type Snapshot: EntryReader + Clone; @@ -71,7 +73,7 @@ pub trait EntryStorage: EntryReader + Clone + std::fmt::Debug + 'static { fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result; } -pub trait EntryReader: 'static { +pub trait EntryReader: Debug + 'static { fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; fn split_range( From 2a40da74e8dc23d3f0920322863fbc713b1f0e7b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 11 Jul 2024 17:42:21 +0200 Subject: [PATCH 083/198] wip: add peer manager and make intents work --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/actor.rs | 111 ++- iroh-willow/src/auth.rs | 22 +- iroh-willow/src/form.rs | 9 + iroh-willow/src/lib.rs | 2 +- iroh-willow/src/net.rs | 24 +- iroh-willow/src/proto/challenge.rs | 4 + iroh-willow/src/proto/grouping.rs | 52 +- iroh-willow/src/proto/willow.rs | 11 +- iroh-willow/src/session.rs | 80 +- iroh-willow/src/session/aoi_finder.rs | 31 +- iroh-willow/src/session/capabilities.rs | 31 +- iroh-willow/src/session/data.rs | 18 +- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/events.rs | 1119 +++++++++++++++++++++++ iroh-willow/src/session/reconciler.rs | 72 +- iroh-willow/src/session/run.rs | 125 ++- iroh-willow/src/store.rs | 6 +- iroh-willow/src/store/entry.rs | 35 +- 20 files changed, 1604 insertions(+), 152 deletions(-) create mode 100644 iroh-willow/src/session/events.rs diff --git a/Cargo.lock b/Cargo.lock index 94c67f1bd1..c52d751947 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2933,6 +2933,7 @@ dependencies = [ "derive_more", "ed25519-dalek", "flume", + "futures-buffered", "futures-concurrency", "futures-lite 2.3.0", "futures-util", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 1be238597f..8bc4d5997f 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -42,6 +42,7 @@ zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } hex = "0.4.3" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core", "serde"] } sha2 = "0.10.8" +futures-buffered = "0.2.6" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 52ccc8e7f9..656aac6d83 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -9,7 +9,7 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ - auth::{CapSelector, CapabilityPack, DelegateTo}, + auth::{CapSelector, CapabilityPack, DelegateTo, InterestMap}, form::{AuthForm, EntryForm, EntryOrForm}, proto::{ grouping::ThreeDRange, @@ -18,15 +18,20 @@ use crate::{ sync::InitialTransmission, willow::{AuthorisedEntry, Entry}, }, - session::{run_session, Channels, Error, Role, SessionId, SessionInit}, + session::{ + events::{EventKind, EventSender, SessionEvent}, + run_session, Channels, Error, Interests, Role, SessionId, SessionInit, SessionUpdate, + }, store::{ + entry::EntryOrigin, traits::{EntryReader, SecretStorage, Storage}, - Origin, Store, + Store, }, util::task::{JoinMap, TaskKey}, }; pub const INBOX_CAP: usize = 1024; +pub const SESSION_EVENT_CAP: usize = 1024; #[derive(Debug, Clone)] pub struct ActorHandle { @@ -43,7 +48,18 @@ impl ActorHandle { create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, ) -> ActorHandle { + let (handle, events_rx) = Self::spawn_with_events(create_store, me); + // drop all events + tokio::task::spawn(async move { while events_rx.recv_async().await.is_ok() {} }); + handle + } + + pub fn spawn_with_events( + create_store: impl 'static + Send + FnOnce() -> S, + me: NodeId, + ) -> (ActorHandle, flume::Receiver) { let (tx, rx) = flume::bounded(INBOX_CAP); + let (session_event_tx, session_event_rx) = flume::bounded(SESSION_EVENT_CAP); let join_handle = std::thread::Builder::new() .name("willow-actor".to_string()) .spawn(move || { @@ -59,6 +75,7 @@ impl ActorHandle { next_session_id: 0, session_tasks: Default::default(), tasks: Default::default(), + session_event_tx, }; if let Err(error) = actor.run() { error!(?error, "storage thread failed"); @@ -66,7 +83,7 @@ impl ActorHandle { }) .expect("failed to spawn thread"); let join_handle = Arc::new(Some(join_handle)); - ActorHandle { tx, join_handle } + (ActorHandle { tx, join_handle }, session_event_rx) } pub async fn send(&self, action: ToActor) -> Result<()> { self.tx.send_async(action).await?; @@ -80,7 +97,7 @@ impl ActorHandle { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::IngestEntry { authorised_entry, - origin: Origin::Local, + origin: EntryOrigin::Local, reply, }) .await?; @@ -99,7 +116,7 @@ impl ActorHandle { Ok(()) } - pub async fn insert_form( + pub async fn insert( &self, form: EntryForm, authorisation: impl Into, @@ -158,6 +175,9 @@ impl ActorHandle { .await?; reply_rx.await? } + + // pub async fn subscribe_namespace(&self, namespace: NamespaceId) -> Result {} + pub async fn create_namespace( &self, kind: NamespaceKind, @@ -198,6 +218,13 @@ impl ActorHandle { self.send(ToActor::ImportCaps { caps, reply }).await?; reply_rx.await? } + + pub async fn resolve_interests(&self, interests: Interests) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::ResolveInterests { interests, reply }) + .await?; + reply_rx.await? + } } impl Drop for ActorHandle { @@ -215,28 +242,37 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { + session_id: SessionId, on_finish: future::Shared>>>, cancel_token: CancellationToken, + update_tx: flume::Sender, } impl SessionHandle { fn new( + session_id: SessionId, cancel_token: CancellationToken, - on_finish: oneshot::Receiver>, + on_finish: oneshot::Receiver>>, + update_tx: flume::Sender, ) -> Self { let on_finish = on_finish .map(|r| match r { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(Arc::new(err)), + Ok(res) => res, Err(_) => Err(Arc::new(Error::ActorFailed)), }) .boxed() .shared(); SessionHandle { + session_id, on_finish, cancel_token, + update_tx, } } + + pub fn session_id(&self) -> SessionId { + self.session_id + } /// Wait for the session to finish. /// /// Returns an error if the session failed to complete. @@ -244,6 +280,11 @@ impl SessionHandle { self.on_finish.clone().await } + pub async fn send_update(&self, update: SessionUpdate) -> Result<()> { + self.update_tx.send_async(update).await?; + Ok(()) + } + /// Finish the session gracefully. /// /// After calling this, no further protocol messages will be sent from this node. @@ -265,6 +306,11 @@ pub enum ToActor { init: SessionInit, reply: oneshot::Sender>, }, + // UpdateSession { + // session_id: SessionId, + // interests: Interests, + // reply: oneshot::Sender>, + // }, GetEntries { namespace: NamespaceId, range: ThreeDRange, @@ -273,7 +319,7 @@ pub enum ToActor { }, IngestEntry { authorised_entry: AuthorisedEntry, - origin: Origin, + origin: EntryOrigin, reply: oneshot::Sender>, }, InsertEntry { @@ -297,6 +343,10 @@ pub enum ToActor { caps: Vec, reply: oneshot::Sender>, }, + ResolveInterests { + interests: Interests, + reply: oneshot::Sender>, + }, DelegateCaps { from: CapSelector, access_mode: AccessMode, @@ -314,7 +364,7 @@ pub enum ToActor { struct ActiveSession { #[allow(unused)] peer: NodeId, - on_finish: oneshot::Sender>, + on_finish: oneshot::Sender>>, task_key: TaskKey, // state: SharedSessionState } @@ -326,6 +376,7 @@ pub struct Actor { sessions: HashMap, session_tasks: JoinMap>, tasks: JoinSet<()>, + session_event_tx: flume::Sender, } impl Actor { @@ -363,7 +414,7 @@ impl Actor { Ok(res) => res, Err(err) => Err(err.into()) }; - self.complete_session(&id, res); + self.complete_session(&id, res).await; } }; } @@ -388,17 +439,11 @@ impl Actor { init, reply, } => { - // let Channels { send, recv } = channels; let id = self.next_session_id(); - // let session = - // Session::new(&self.store, id, our_role, send, init, initial_transmission); - // let session = match session { - // Ok(session) => session, - // Err(err) => return send_reply(reply, Err(err.into())), - // }; - let store = self.store.clone(); let cancel_token = CancellationToken::new(); + let event_sender = EventSender::new(id, self.session_event_tx.clone()); + let (update_tx, update_rx) = flume::bounded(16); let future = run_session( store, @@ -408,11 +453,11 @@ impl Actor { our_role, init, initial_transmission, + event_sender, + update_rx, ) .instrument(error_span!("session", peer = %peer.fmt_short())); - // let future = session - // .run(store, recv, cancel_token.clone()) - // .instrument(error_span!("session", peer = %peer.fmt_short())); + let task_key = self.session_tasks.spawn_local(id, future); let (on_finish_tx, on_finish_rx) = oneshot::channel(); @@ -422,7 +467,7 @@ impl Actor { peer, }; self.sessions.insert(id, active_session); - let handle = SessionHandle::new(cancel_token, on_finish_rx); + let handle = SessionHandle::new(id, cancel_token, on_finish_rx, update_tx); send_reply(reply, Ok(handle)) } ToActor::GetEntries { @@ -491,14 +536,28 @@ impl Actor { .delegate_full_caps(from, access_mode, to, store); send_reply(reply, res.map_err(anyhow::Error::from)) } + ToActor::ResolveInterests { interests, reply } => { + let res = self.store.auth().resolve_interests(interests); + send_reply(reply, res.map_err(anyhow::Error::from)) + } } } - fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { + async fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { let session = self.sessions.remove(session_id); if let Some(session) = session { debug!(?session, ?result, "complete session"); - session.on_finish.send(result).ok(); + let result = match result { + Ok(()) => Ok(()), + Err(err) => Err(Arc::new(err)), + }; + // TODO: remove + session.on_finish.send(result.clone()).ok(); + self.session_event_tx + .send_async(SessionEvent::new(*session_id, EventKind::Closed { result })) + .await + .ok(); + self.session_tasks.remove(&session.task_key); } else { warn!("remove_session called for unknown session"); diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index ca2dee9f62..69a84fd409 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -36,13 +36,19 @@ impl DelegateTo { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct CapSelector { pub namespace_id: NamespaceId, pub user: UserSelector, pub area: AreaSelector, } +impl From for CapSelector { + fn from(value: NamespaceId) -> Self { + Self::widest(value) + } +} + impl CapSelector { pub fn matches(&self, cap: &McCapability) -> bool { self.namespace_id == cap.granted_namespace().id() @@ -71,7 +77,9 @@ impl CapSelector { } } -#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize)] +#[derive( + Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, +)] pub enum UserSelector { #[default] Any, @@ -87,7 +95,7 @@ impl UserSelector { } } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Hash, Eq, PartialEq)] pub enum AreaSelector { #[default] Widest, @@ -219,10 +227,7 @@ impl Auth { } } - pub fn find_read_caps_for_interests( - &self, - interests: Interests, - ) -> Result { + pub fn resolve_interests(&self, interests: Interests) -> Result { match interests { Interests::All => { let out = self @@ -235,7 +240,7 @@ impl Auth { .collect::>(); Ok(out) } - Interests::Some(interests) => { + Interests::Select(interests) => { let mut out: HashMap> = HashMap::new(); for (cap_selector, aoi_selector) in interests { let cap = self.get_read_cap(&cap_selector)?; @@ -257,6 +262,7 @@ impl Auth { } Ok(out) } + Interests::Exact(interests) => Ok(interests), } } diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 6c59857222..6b4db56f10 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -94,6 +94,15 @@ pub struct EntryForm { } impl EntryForm { + pub fn new_bytes(namespace_id: NamespaceId, path: Path, payload: impl Into) -> Self { + EntryForm { + namespace_id, + subspace_id: SubspaceForm::User, + path, + timestamp: TimestampForm::Now, + payload: PayloadForm::Bytes(payload.into()), + } + } pub async fn into_entry( self, store: &Store, diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index c7ea94a586..fcf82156e1 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,6 +1,6 @@ //! Implementation of willow -#![allow(missing_docs)] +#![allow(missing_docs, unused_imports)] pub mod actor; pub mod auth; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 2c4680f6c1..b42068094b 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -28,15 +28,14 @@ use crate::{ }; pub const CHANNEL_CAP: usize = 1024 * 64; +pub const ALPN: &[u8] = b"iroh-willow/0"; #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] -pub async fn run( - me: NodeId, - actor: ActorHandle, +pub async fn setup( conn: Connection, + me: NodeId, our_role: Role, - init: SessionInit, -) -> anyhow::Result { +) -> anyhow::Result<(InitialTransmission, Channels, JoinSet>)> { let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; Span::current().record("peer", tracing::field::display(peer.fmt_short())); debug!(?our_role, "connected"); @@ -76,6 +75,19 @@ pub async fn run( logical_recv, }, }; + Ok((initial_transmission, channels, tasks)) +} + +#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] +pub async fn run( + me: NodeId, + actor: ActorHandle, + conn: Connection, + our_role: Role, + init: SessionInit, +) -> anyhow::Result { + let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; + let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; let handle = actor .init_session(peer, our_role, initial_transmission, channels, init) .await?; @@ -612,7 +624,7 @@ mod tests { timestamp: TimestampForm::Now, payload: PayloadForm::Bytes(payload.into()), }; - let (entry, inserted) = handle.insert_form(entry, AuthForm::Any(user_id)).await?; + let (entry, inserted) = handle.insert(entry, AuthForm::Any(user_id)).await?; assert!(inserted); track_entries.extend([entry]); } diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 0e0128f5cf..3ff6f77afa 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -41,6 +41,10 @@ impl ChallengeState { } } + pub fn is_revealed(&self) -> bool { + matches!(self, Self::Revealed { .. }) + } + pub fn sign(&self, secret_key: &UserSecretKey) -> Result { let signable = self.signable()?; let signature = secret_key.sign(&signable); diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 454afd09af..fe48703139 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -269,6 +269,12 @@ pub struct AreaOfInterest { pub max_size: u64, } +impl From for AreaOfInterest { + fn from(value: Area) -> Self { + Self::new(value) + } +} + impl AreaOfInterest { pub fn new(area: Area) -> Self { Self { @@ -285,6 +291,25 @@ impl AreaOfInterest { max_size: 0, } } + + pub fn intersection(&self, other: &AreaOfInterest) -> Option { + let area = self.area.intersection(&other.area)?; + let max_count = match (self.max_count, other.max_count) { + (0, count) => count, + (count, 0) => count, + (a, b) => a.min(b), + }; + let max_size = match (self.max_size, other.max_size) { + (0, size) => size, + (size, 0) => size, + (a, b) => a.min(b), + }; + Some(Self { + area, + max_count, + max_size, + }) + } } /// A grouping of Entries. @@ -315,6 +340,10 @@ impl Area { Self::new(SubspaceArea::Any, Path::empty(), Range::::EMPTY) } + pub fn path(path: Path) -> Self { + Self::new(SubspaceArea::Any, path, Default::default()) + } + pub fn subspace(subspace_id: SubspaceId) -> Self { Self::new( SubspaceArea::Id(subspace_id), @@ -343,6 +372,10 @@ impl Area { && self.times.includes_range(&other.times) } + pub fn has_intersection(&self, other: &Area) -> bool { + self.includes_area(other) || other.includes_area(self) + } + pub fn includes_range(&self, range: &ThreeDRange) -> bool { let path_start = self.path.is_prefix_of(&range.paths.start); let path_end = match &range.paths.end { @@ -399,8 +432,7 @@ pub fn path_range_end(path: &Path) -> RangeEnd { for component in path.iter().rev() { // component can be incremented if out.is_empty() && component.iter().any(|x| *x != 0xff) { - let mut bytes = Vec::with_capacity(component.len()); - bytes.copy_from_slice(component); + let mut bytes = component.to_vec(); let incremented = increment_by_one(&mut bytes); debug_assert!(incremented, "checked above"); out.push(Bytes::from(bytes)); @@ -496,7 +528,7 @@ impl SubspaceArea { /// A single point in the 3D range space. /// /// I.e. an entry. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct Point { pub path: Path, pub timestamp: Timestamp, @@ -637,3 +669,17 @@ impl<'a> Encoder for AreaInArea<'a> { Ok(()) } } + +#[cfg(test)] +mod tests { + use crate::proto::{grouping::Area, willow::Path}; + + #[test] + fn area_eq() { + let p1 = Path::new(&[b"foo", b"bar"]).unwrap(); + let a1 = Area::path(p1); + let p2 = Path::new(&[b"foo", b"bar"]).unwrap(); + let a2 = Area::path(p2); + assert_eq!(a1, a2); + } +} diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs index ebc9463f3b..29f2e729c6 100644 --- a/iroh-willow/src/proto/willow.rs +++ b/iroh-willow/src/proto/willow.rs @@ -128,12 +128,19 @@ impl Path { pub fn intersection(&self, other: &Path) -> Option { if self.is_prefix_of(other) { - Some(self.clone()) - } else if other.is_prefix_of(self) { Some(other.clone()) + } else if other.is_prefix_of(self) { + Some(self.clone()) } else { None } + // if self.is_prefix_of(other) { + // Some(self.clone()) + // } else if other.is_prefix_of(self) { + // Some(other.clone()) + // } else { + // None + // } } pub fn common_prefix(&self, other: &Path) -> &[Component] { diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 8ffeefaaba..63cd3256cf 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,13 +1,16 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{hash_map, BTreeMap, BTreeSet, HashMap}; -use crate::{auth::CapSelector, proto::grouping::AreaOfInterest}; +use crate::{ + auth::CapSelector, + proto::{grouping::AreaOfInterest, sync::ReadAuthorisation}, +}; mod aoi_finder; mod capabilities; pub mod channels; mod data; mod error; -mod events; +pub mod events; mod pai_finder; mod payload; mod reconciler; @@ -60,9 +63,75 @@ impl SessionMode { pub enum Interests { #[default] All, - Some(BTreeMap), + Select(HashMap), + Exact(HashMap>), } +impl Interests { + pub fn select() -> SelectBuilder { + SelectBuilder::default() + } +} + +#[derive(Default, Debug)] +pub struct SelectBuilder(HashMap); + +impl SelectBuilder { + pub fn add_full(mut self, cap: impl Into) -> Self { + let cap = cap.into(); + self.0.insert(cap, AreaOfInterestSelector::Widest); + self + } + + pub fn area( + mut self, + cap: impl Into, + aois: impl IntoIterator>, + ) -> Self { + let cap = cap.into(); + let aois = aois.into_iter(); + let aois = aois.map(|aoi| aoi.into()); + match self.0.entry(cap) { + hash_map::Entry::Vacant(entry) => { + entry.insert(AreaOfInterestSelector::Exact(aois.collect())); + } + hash_map::Entry::Occupied(mut entry) => match entry.get_mut() { + AreaOfInterestSelector::Widest => {} + AreaOfInterestSelector::Exact(existing) => existing.extend(aois), + }, + } + self + } + + pub fn build(self) -> Interests { + Interests::Select(self.0) + } +} + +impl From for Interests { + fn from(builder: SelectBuilder) -> Self { + builder.build() + } +} + +#[derive(Debug, Clone)] +pub enum SessionUpdate { + AddInterests(Interests), +} + +// impl Interest { +// pub fn merge(&self, other: &Interests) -> Self { +// match (self, other) { +// (Self::All, _) => Self::All, +// (_, Self::All) => Self::All, +// (Self::Some(a), Self::Some(b)) => { +// +// } +// +// } +// } +// } + #[derive(Debug, Default, Clone)] pub enum AreaOfInterestSelector { #[default] @@ -79,7 +148,8 @@ pub struct SessionInit { } impl SessionInit { - pub fn new(interests: Interests, mode: SessionMode) -> Self { + pub fn new(interests: impl Into, mode: SessionMode) -> Self { + let interests = interests.into(); Self { interests, mode } } } diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index b6f7fc6edc..73676b16f8 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -14,10 +14,20 @@ use crate::{ pub struct AoiIntersection { pub our_handle: AreaOfInterestHandle, pub their_handle: AreaOfInterestHandle, - pub intersection: Area, + pub intersection: AreaOfInterest, pub namespace: NamespaceId, } +impl AoiIntersection { + pub fn id(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { + (self.our_handle, self.their_handle) + } + + pub fn area(&self) -> &Area { + &self.intersection.area + } +} + #[derive(Debug, Default, Clone)] pub struct AoiFinder(Rc>); @@ -81,8 +91,11 @@ impl Inner { namespace: NamespaceId, aoi: AreaOfInterest, ) -> Result<(), Error> { - let area = aoi.area.clone(); - let info = AoiInfo { aoi, namespace }; + // let area = aoi.area.clone(); + let info = AoiInfo { + aoi: aoi.clone(), + namespace, + }; let handle = match scope { Scope::Ours => self.our_handles.bind(info), Scope::Theirs => self.their_handles.bind(info), @@ -95,12 +108,13 @@ impl Inner { // TODO: If we stored the AoIs by namespace we would need to iterate less. for (candidate_handle, candidate) in other_resources.iter() { - let candidate_handle = *candidate_handle; if candidate.namespace != namespace { continue; } + let candidate_handle = *candidate_handle; // Check if we have an intersection. - if let Some(intersection) = candidate.area().intersection(&area) { + if let Some(intersection) = candidate.aoi.intersection(&aoi) { + tracing::warn!(a=?aoi, b=?candidate.aoi, ?intersection, "AOI INTERSECTION"); // We found an intersection! let (our_handle, their_handle) = match scope { Scope::Ours => (handle, candidate_handle), @@ -112,6 +126,7 @@ impl Inner { intersection, namespace, }; + // TODO: This can block... self.subscribers .retain(|sender| sender.send(intersection.clone()).is_ok()); } @@ -127,7 +142,7 @@ struct AoiInfo { } impl AoiInfo { - fn area(&self) -> &Area { - &self.aoi.area - } + // fn area(&self) -> &Area { + // &self.aoi.area + // } } diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index c30ef9b81f..b4ecd7d892 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -1,10 +1,12 @@ use std::{ cell::RefCell, - future::poll_fn, + future::{poll_fn, Future}, rc::Rc, - task::{ready, Poll}, + task::{ready, Poll, Waker}, }; +use tokio::sync::Notify; + use crate::{ proto::{ challenge::ChallengeState, @@ -27,6 +29,7 @@ struct Inner { challenge: ChallengeState, ours: ResourceMap, theirs: ResourceMap, + on_reveal_wakers: Vec, } impl Capabilities { @@ -39,9 +42,22 @@ impl Capabilities { challenge, ours: Default::default(), theirs: Default::default(), + on_reveal_wakers: Default::default(), }))) } + pub fn revealed(&self) -> impl Future + '_ { + std::future::poll_fn(|cx| { + let mut inner = self.0.borrow_mut(); + if inner.challenge.is_revealed() { + Poll::Ready(()) + } else { + inner.on_reveal_wakers.push(cx.waker().to_owned()); + Poll::Pending + } + }) + } + pub async fn bind_and_send_ours( &self, secret_store: &S, @@ -127,9 +143,18 @@ impl Capabilities { our_role: Role, their_nonce: AccessChallenge, ) -> Result<(), Error> { - self.0.borrow_mut().challenge.reveal(our_role, their_nonce) + let mut inner = self.0.borrow_mut(); + inner.challenge.reveal(our_role, their_nonce)?; + for waker in inner.on_reveal_wakers.drain(..) { + waker.wake(); + } + Ok(()) } + // pub fn is_revealed(&self) -> bool { + // self.0.borrow().challenge.is_revealed() + // } + pub fn sign_subspace_capabiltiy( &self, secrets: &S, diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index a02d91176c..bda0b1327d 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -9,7 +9,11 @@ use crate::{ aoi_finder::AoiIntersectionQueue, channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, SessionId, }, - store::{traits::Storage, Origin, Store}, + store::{ + entry::{EntryChannel, EntryOrigin}, + traits::Storage, + Store, + }, }; use super::payload::{send_payload_chunked, CurrentPayload}; @@ -50,7 +54,7 @@ impl DataSender { self.store.entries().watch_area( self.session_id, intersection.namespace, - intersection.intersection.clone(), + intersection.intersection.area.clone(), ); }, entry = entry_stream.recv() => { @@ -139,9 +143,13 @@ impl DataReceiver { message.dynamic_token, ) .await?; - self.store - .entries() - .ingest(&authorised_entry, Origin::Remote(self.session_id))?; + self.store.entries().ingest( + &authorised_entry, + EntryOrigin::Remote { + session: self.session_id, + channel: EntryChannel::Data, + }, + )?; let entry = authorised_entry.into_entry(); // TODO: handle offset self.current_payload diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index eb6cb4914b..2f08c8e542 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -69,6 +69,8 @@ pub enum Error { NoKnownInterestsForCapability, #[error("private area intersection error: {0}")] Pai(#[from] PaiError), + #[error("net failed: {0}")] + Net(anyhow::Error), } impl From for Error { diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs new file mode 100644 index 0000000000..8885077593 --- /dev/null +++ b/iroh-willow/src/session/events.rs @@ -0,0 +1,1119 @@ +use std::{ + collections::{hash_map, BTreeSet, HashMap}, + sync::Arc, +}; + +use anyhow::{anyhow, Context, Result}; +use futures_lite::StreamExt; +use futures_util::FutureExt; +use iroh_net::{ + dialer::Dialer, endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId, +}; +use tokio::{ + io::Interest, + sync::oneshot, + task::{AbortHandle, JoinHandle, JoinSet}, +}; +use tracing::{error_span, Instrument}; + +use crate::{ + actor::{Actor, ActorHandle, SessionHandle}, + auth::{Auth, InterestMap}, + net::{setup, ALPN}, + proto::{ + grouping::{Area, AreaOfInterest}, + keys::NamespaceId, + sync::{ReadAuthorisation, ReadCapability}, + }, + session::{Error, Interests, Role, SessionId, SessionInit, SessionMode}, + store::traits::Storage, +}; + +use super::SessionUpdate::AddInterests; + +type NamespaceInterests = HashMap>; + +const COMMAND_CHANNEL_CAP: usize = 128; + +// type Names +// type NamespaceAuthInterests = HashMap; +// +// #[derive(Debug, Clone, Default)] +// struct InterestsByNamespace { +// auths: BTreeSet, +// aois: BTreeSet, +// } + +// impl InterestsByNamespace { +// fn add(&mut self, other: &InterestsByNamespace) { +// self.auths.extend(other.auths); +// self.aois.extend(other.aois); +// } +// } + +#[derive(Debug, Clone)] +pub struct EventSender { + session_id: SessionId, + sender: flume::Sender, +} + +impl EventSender { + pub fn new(session_id: SessionId, sender: flume::Sender) -> Self { + Self { session_id, sender } + } + pub async fn send(&self, event: EventKind) -> Result<(), Error> { + self.sender + .send_async(SessionEvent::new(self.session_id, event)) + .await + .map_err(|_| Error::InvalidState("session event receiver dropped"))?; + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct SessionEvent { + session_id: SessionId, + event: EventKind, +} + +impl SessionEvent { + pub fn new(session_id: SessionId, event: EventKind) -> Self { + Self { session_id, event } + } +} + +#[derive(Debug, Clone)] +pub enum EventKind { + CapabilityIntersection { + capability: ReadCapability, + }, + // TODO: AoI + AoiIntersection { + namespace: NamespaceId, + area: AreaOfInterest, + }, + // TODO: AoI + Reconciled { + namespace: NamespaceId, + area: AreaOfInterest, + }, + ReconciledAll, + Closed { + result: Result<(), Arc>, + }, // ReconciledAll, +} + +impl EventKind { + pub fn namespace(&self) -> Option { + match self { + EventKind::CapabilityIntersection { capability } => { + Some(capability.granted_namespace().id()) + } + EventKind::AoiIntersection { namespace, .. } => Some(*namespace), + EventKind::Reconciled { namespace, .. } => Some(*namespace), + _ => None, + } + } +} + +// #[derive(Debug, Clone)] +// pub struct SyncEvent { +// peer: NodeId, +// event: EventKind, +// } + +#[derive(Debug)] +pub enum Command { + SyncWithPeer { + peer: NodeId, + init: SessionInit, + reply: oneshot::Sender>, + }, + UpdateIntent { + peer: NodeId, + intent_id: u64, + add_interests: Interests, + reply: oneshot::Sender>, + }, + CancelIntent { + peer: NodeId, + intent_id: u64, + }, + HandleConnection { + conn: Connection, + }, +} + +#[derive(Debug, Clone)] +pub struct ManagedHandle { + actor: ActorHandle, + command_tx: flume::Sender, + _task_handle: SharedAbortingJoinHandle>, +} + +impl ManagedHandle { + pub fn spawn( + endpoint: Endpoint, + create_store: impl 'static + Send + FnOnce() -> S, + ) -> Self { + let me = endpoint.node_id(); + let (actor, event_rx) = ActorHandle::spawn_with_events(create_store, me); + let (command_tx, command_rx) = flume::bounded(COMMAND_CHANNEL_CAP); + let peer_manager = PeerManager { + event_rx, + command_rx, + command_tx: command_tx.clone(), + establish_tasks: Default::default(), + actor: actor.clone(), + peers: Default::default(), + sessions: Default::default(), + endpoint: endpoint.clone(), + dialer: Dialer::new(endpoint), + next_intent_id: 0, + }; + let task_handle = tokio::task::spawn( + async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } + .instrument(error_span!("peer_manager", me = me.fmt_short())), + ); + ManagedHandle { + actor, + command_tx, + _task_handle: task_handle.into(), + } + } + + pub async fn handle_connection(&self, conn: Connection) -> Result<()> { + self.command_tx + .send_async(Command::HandleConnection { conn }) + .await?; + Ok(()) + } + + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.command_tx + .send_async(Command::SyncWithPeer { peer, init, reply }) + .await?; + reply_rx.await? + } +} + +impl std::ops::Deref for ManagedHandle { + type Target = ActorHandle; + + fn deref(&self) -> &Self::Target { + &self.actor + } +} + +type EstablishRes = (NodeId, Result<(JoinSet>, SessionHandle)>); + +#[derive(Debug)] +pub struct PeerManager { + event_rx: flume::Receiver, + command_rx: flume::Receiver, + command_tx: flume::Sender, + establish_tasks: JoinSet, + + actor: ActorHandle, + peers: HashMap, + // auth: Auth, + sessions: HashMap, + // intents: HashMap, + endpoint: Endpoint, + dialer: Dialer, + next_intent_id: u64, +} + +#[derive(Debug)] +struct SessionInfo { + // peer: NodeId, + our_role: Role, + complete_areas: NamespaceInterests, + submitted_interests: InterestMap, + intents: Vec, + handle: SessionHandle, +} + +impl SessionInfo { + async fn push_interests(&mut self, interests: InterestMap) -> Result<()> { + let new_interests = self.merge_interests(interests); + self.handle + .send_update(AddInterests(Interests::Exact(new_interests))) + .await?; + Ok(()) + } + // TODO: Less clones? + fn merge_interests(&mut self, interests: InterestMap) -> InterestMap { + let mut new: InterestMap = HashMap::new(); + for (auth, aois) in interests.into_iter() { + match self.submitted_interests.entry(auth.clone()) { + hash_map::Entry::Vacant(entry) => { + entry.insert(aois.clone()); + new.insert(auth, aois.clone()); + } + hash_map::Entry::Occupied(mut entry) => { + let existing = entry.get_mut(); + for aoi in aois { + if !existing.contains(&aoi) { + existing.insert(aoi.clone()); + new.entry(auth.clone()).or_default().insert(aoi); + } + } + } + } + } + new + // for (namespace, details) in interests.into_iter() { + // let namespace = *namespace; + // match self.submitted_interests.entry(namespace) { + // hash_map::Entry::Vacant(entry) => { + // entry.insert(details.clone()); + // new.insert(namespace, details.clone()); + // } + // hash_map::Entry::Occupied(mut entry) => { + // let existing = entry.get_mut(); + // for aoi in details.aois { + // if !existing.aois.contains(&aoi) { + // existing.aois.insert(aoi.clone()); + // new.entry(namespace).or_default().aois.insert(aoi); + // } + // } + // for auth in details.auths { + // if !existing.auths.contains(&auth) { + // existing.auths.insert(auth.clone()); + // new.entry(namespace).or_default().auths.insert(auth); + // } + // } + // } + // } + // } + } +} + +#[derive(Debug)] +struct IntentInfo { + // peer: NodeId, + intent_id: u64, + interests: NamespaceInterests, + mode: SessionMode, + sender: flume::Sender, +} + +#[derive(Debug)] +pub struct IntentHandle { + peer: NodeId, + intent_id: u64, + receiver: flume::Receiver, + sender: flume::Sender, +} + +impl IntentHandle { + // TODO: impl stream + pub async fn next(&self) -> Option { + self.receiver.recv_async().await.ok() + } + + pub async fn complete(&self) -> Result<(), Arc> { + loop { + let event = self + .receiver + .recv_async() + .await + .map_err(|_| Arc::new(Error::ActorFailed))?; + if let EventKind::Closed { result } = event { + return result; + } + } + } + + pub async fn add_interests(&self, interests: Interests) -> Result<()> { + let (reply, reply_rx) = oneshot::channel(); + self.sender + .send_async(Command::UpdateIntent { + peer: self.peer, + intent_id: self.intent_id, + add_interests: interests, + reply, + }) + .await?; + reply_rx.await? + } +} + +// #[derive(Debug, Clone)] +// pub enum SyncEvent { +// Progress(EventKind), +// ReconciledAll, +// } + +// struct InterestInfo { +// aoi: AreaOfInterest, +// reconciled: bool, +// } + +#[derive(Debug)] +struct PeerInfo { + state: PeerState, +} + +#[derive(Debug)] +enum PeerState { + Connecting { + intents: Vec, + interests: InterestMap, + }, + Establishing { + our_role: Role, + intents: Vec, + submitted_interests: InterestMap, + pending_interests: InterestMap, + task_handle: AbortHandle, + }, + Active { + session_id: SessionId, + net_tasks: JoinSet>, + }, + // Closing { + // session_id: SessionId, + // }, + Placeholder, +} + +impl IntentInfo { + fn merge_interests(&mut self, interests: &InterestMap) { + for (auth, aois) in interests.iter() { + self.interests + .entry(auth.namespace()) + .or_default() + .extend(aois.clone()); + } + } + // fn handle_event(&mut self, event: &EventKind) -> (bool, Continuation) { + // match event { + // EventKind::CapabilityIntersection { capability } => { + // if self + // .interests + // .contains_key(&capability.granted_namespace().id()) + // { + // (true, Continuation::Continue) + // } else { + // (false, Continuation::Continue) + // } + // } + // EventKind::AoiIntersection { area, namespace } => match self.interests.get(namespace) { + // None => (false, Continuation::Continue), + // Some(interests) => { + // let matches = interests + // .iter() + // .any(|x| x.area.has_intersection(&area.area)); + // (matches, Continuation::Continue) + // } + // }, + // EventKind::Reconciled { area, namespace } => { + // let Some(interests) = self.interests.get_mut(namespace) else { + // return (false, Continuation::Continue); + // }; + // let matches = interests + // .iter() + // .any(|x| x.area.has_intersection(&area.area)); + // let cont = if matches { + // interests.retain(|x| area.area.includes_area(&x.area)); + // if interests.is_empty() { + // Continuation::Complete + // } else { + // Continuation::Continue + // } + // } else { + // Continuation::Continue + // }; + // (matches, cont) + // } + // EventKind::Closed { .. } => (true, Continuation::Complete), + // EventKind::ReconciledAll => (false, Continuation::Complete), + // } + // } + + async fn handle_event(&mut self, event: &EventKind) -> Result { + let send = |event: EventKind| async { + self.sender + .send_async(event) + .await + .map_err(|_| ReceiverDropped) + }; + match &event { + EventKind::CapabilityIntersection { capability } => { + if self + .interests + .contains_key(&capability.granted_namespace().id()) + { + send(event.clone()).await?; + Ok(true) + } else { + Ok(true) + } + } + EventKind::AoiIntersection { area, namespace } => match self.interests.get(namespace) { + None => Ok(true), + Some(interests) => { + let matches = interests + .iter() + .any(|x| x.area.has_intersection(&area.area)); + if matches { + send(event.clone()).await?; + } + Ok(true) + } + }, + EventKind::Reconciled { area, namespace } => { + let Some(interests) = self.interests.get_mut(namespace) else { + return Ok(true); + }; + let matches = interests + .iter() + .any(|x| x.area.has_intersection(&area.area)); + tracing::info!(?interests, ?matches, ?area, "reconciled pre"); + if matches { + interests.retain(|x| !area.area.includes_area(&x.area)); + tracing::info!(?interests, ?matches, "reconciled post"); + send(event.clone()).await?; + if interests.is_empty() { + send(EventKind::ReconciledAll).await?; + Ok(true) + } else { + Ok(true) + } + } else { + Ok(true) + } + } + EventKind::Closed { .. } => { + send(event.clone()).await?; + Ok(false) + } + EventKind::ReconciledAll => Ok(true), + } + + // let (should_sent, cont) = self.handle_event(event); + // if should_sent { + // self.sender + // .send_async(event.clone()) + // .await + // .map_err(|_| ReceiverDropped)?; + // } + // Ok(cont) + // let event = Ok(SyncEvent::Progress(event.clone())); + } +} + +#[derive(Debug, thiserror::Error)] +#[error("receiver dropped")] +pub struct ReceiverDropped; + +enum Continuation { + Continue, + Complete, +} + +impl PeerManager { + pub async fn run(mut self) -> Result<(), Error> { + loop { + tokio::select! { + Ok(event) = self.event_rx.recv_async() => { + self.received_event(event).await; + } + Ok(command) = self.command_rx.recv_async() => { + self.received_command(command).await; + } + Some(res) = self.establish_tasks.join_next(), if !self.establish_tasks.is_empty() => { + let res = match res { + Ok(res) => res, + Err(err) if err.is_cancelled() => { + continue; + }, + Err(err) => Err(err).context("establish task paniced")?, + }; + self.on_established(res).await?; + + } + Some((peer, conn)) = self.dialer.next() => { + match conn { + Ok(conn) => self.handle_connection(conn, Role::Alfie).await?, + Err(err) => self.on_dial_fail(peer, err).await, + } + + } + else => break, + } + } + Ok(()) + } + + async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { + let Some(peer_info) = self.peers.remove(&peer) else { + tracing::warn!(?peer, "dialer returned connection error for unknown peer"); + return; + }; + let PeerState::Connecting { intents, .. } = peer_info.state else { + tracing::warn!( + ?peer, + "dialer returned connection error for peer in wrong state" + ); + return; + }; + let result = Err(Arc::new(Error::Net(err))); + for intent in intents { + let result = result.clone(); + intent + .sender + .send_async(EventKind::Closed { result }) + .await + .ok(); + } + } + + async fn on_established(&mut self, res: EstablishRes) -> anyhow::Result<()> { + let (peer, res) = res; + let peer_info = self + .peers + .get_mut(&peer) + .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; + let peer_state = std::mem::replace(&mut peer_info.state, PeerState::Placeholder); + let PeerState::Establishing { + our_role, + intents, + submitted_interests, + pending_interests, + task_handle: _, + } = peer_state + else { + anyhow::bail!("unreachable: on_established called for peer in wrong state") + }; + match res { + Ok((net_tasks, session_handle)) => { + if our_role.is_alfie() && intents.is_empty() { + session_handle.close(); + } + let session_id = session_handle.session_id(); + let mut session_info = SessionInfo { + our_role, + // peer, + complete_areas: Default::default(), + submitted_interests, + intents, + handle: session_handle, + }; + if !pending_interests.is_empty() { + session_info.push_interests(pending_interests).await?; + } + self.sessions.insert(session_id, session_info); + peer_info.state = PeerState::Active { + session_id, + net_tasks, + }; + } + Err(err) => { + tracing::warn!(?peer, ?err, "establishing session failed"); + let result = Err(Arc::new(Error::Net(err))); + for intent in intents { + let result = result.clone(); + intent + .sender + .send_async(EventKind::Closed { result }) + .await + .ok(); + } + self.peers.remove(&peer); + } + } + Ok(()) + } + + pub async fn sync_with_peer( + &mut self, + peer: NodeId, + init: SessionInit, + ) -> Result { + let intent_interests = self.actor.resolve_interests(init.interests).await?; + // TODO: Allow to configure cap? + let (sender, receiver) = flume::bounded(64); + let intent_id = { + let intent_id = self.next_intent_id; + self.next_intent_id += 1; + intent_id + }; + let intent_info = IntentInfo { + // peer, + intent_id, + interests: flatten_interests(&intent_interests), + mode: init.mode, + sender, + }; + // self.intents.insert(intent_id, intent_info); + match self.peers.get_mut(&peer) { + None => { + self.dialer.queue_dial(peer, ALPN); + let intents = vec![intent_info]; + let state = PeerState::Connecting { + intents, + interests: intent_interests, + }; + let peer_info = PeerInfo { state }; + self.peers.insert(peer, peer_info); + } + Some(info) => match &mut info.state { + PeerState::Connecting { intents, interests } => { + intents.push(intent_info); + merge_interests(interests, intent_interests); + } + PeerState::Establishing { + intents, + pending_interests, + .. + } => { + intents.push(intent_info); + merge_interests(pending_interests, intent_interests); + } + PeerState::Active { session_id, .. } => { + let session = self.sessions.get_mut(session_id).expect("session to exist"); + session.intents.push(intent_info); + session.push_interests(intent_interests).await?; + } + PeerState::Placeholder => unreachable!(), + }, + }; + let handle = IntentHandle { + peer, + receiver, + intent_id, + sender: self.command_tx.clone(), + }; + Ok(handle) + } + + pub async fn update_intent( + &mut self, + peer: NodeId, + intent_id: u64, + add_interests: Interests, + ) -> Result<()> { + let add_interests = self.actor.resolve_interests(add_interests).await?; + match self.peers.get_mut(&peer) { + None => anyhow::bail!("invalid node id"), + Some(peer_info) => match &mut peer_info.state { + PeerState::Connecting { intents, interests } => { + let Some(intent_info) = intents.iter_mut().find(|i| i.intent_id == intent_id) + else { + anyhow::bail!("invalid intent id"); + }; + intent_info.merge_interests(&add_interests); + merge_interests(interests, add_interests); + } + PeerState::Establishing { + intents, + pending_interests, + .. + } => { + let Some(intent_info) = intents.iter_mut().find(|i| i.intent_id == intent_id) + else { + anyhow::bail!("invalid intent id"); + }; + intent_info.merge_interests(&add_interests); + merge_interests(pending_interests, add_interests); + } + PeerState::Active { session_id, .. } => { + let session = self.sessions.get_mut(session_id).expect("session to exist"); + let Some(intent_info) = session + .intents + .iter_mut() + .find(|i| i.intent_id == intent_id) + else { + anyhow::bail!("invalid intent id"); + }; + intent_info.merge_interests(&add_interests); + session.push_interests(add_interests).await?; + } + PeerState::Placeholder => unreachable!(), + }, + }; + Ok(()) + } + + pub fn cancel_intent(&mut self, peer: NodeId, intent_id: u64) { + let Some(peer_info) = self.peers.get_mut(&peer) else { + return; + }; + + match &mut peer_info.state { + PeerState::Connecting { intents, .. } => { + intents.retain(|intent_info| intent_info.intent_id != intent_id); + if intents.is_empty() { + self.dialer.abort_dial(&peer); + self.peers.remove(&peer); + } + } + PeerState::Establishing { intents, .. } => { + intents.retain(|intent_info| intent_info.intent_id != intent_id); + } + PeerState::Active { session_id, .. } => { + let session = self.sessions.get_mut(session_id).expect("session to exist"); + session + .intents + .retain(|intent| intent.intent_id != intent_id); + if session.intents.is_empty() { + session.handle.close(); + // TODO: Abort session + } + } + PeerState::Placeholder => unreachable!(), + } + } + + pub async fn received_command(&mut self, command: Command) { + tracing::info!(?command, "command"); + match command { + Command::SyncWithPeer { peer, init, reply } => { + let res = self.sync_with_peer(peer, init).await; + // TODO: Cancel intent if reply send fails? + reply.send(res).ok(); + } + Command::UpdateIntent { + peer, + intent_id, + add_interests, + reply, + } => { + let res = self.update_intent(peer, intent_id, add_interests).await; + // TODO: Cancel intent if reply send fails? + reply.send(res).ok(); + } + Command::CancelIntent { peer, intent_id } => { + self.cancel_intent(peer, intent_id); + } + Command::HandleConnection { conn } => { + if let Err(err) = self.handle_connection(conn, Role::Betty).await { + tracing::warn!("failed to handle connection: {err:?}"); + } + } + } + } + + pub async fn received_event(&mut self, event: SessionEvent) { + tracing::info!(?event, "command"); + let Some(session) = self.sessions.get_mut(&event.session_id) else { + tracing::warn!(?event, "Got event for unknown session"); + return; + }; + + let mut is_closed = false; + match &event.event { + EventKind::Reconciled { namespace, area } => { + session + .complete_areas + .entry(*namespace) + .or_default() + .insert(area.clone()); + } + EventKind::Closed { .. } => { + is_closed = true; + } + _ => {} + } + + let send_futs = session + .intents + .iter_mut() + .map(|intent_info| intent_info.handle_event(&event.event)); + let send_res = futures_buffered::join_all(send_futs).await; + let mut removed = 0; + for (i, res) in send_res.into_iter().enumerate() { + match res { + Err(ReceiverDropped) | Ok(false) => { + session.intents.remove(i - removed); + removed += 1; + } + Ok(true) => {} + } + } + + if session.our_role.is_alfie() && session.intents.is_empty() && !is_closed { + session.handle.close(); + } + + if is_closed { + debug_assert!(session.intents.is_empty()); + // TODO: Wait for net tasks to terminate? + self.sessions.remove(&event.session_id); + } + } + + async fn handle_connection(&mut self, conn: Connection, our_role: Role) -> Result<()> { + let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; + let peer_info = self.peers.get_mut(&peer); + let (interests, mode, intents) = match our_role { + Role::Alfie => { + let peer = peer_info + .ok_or_else(|| anyhow!("got connection for peer without any intents"))?; + let peer_state = std::mem::replace(&mut peer.state, PeerState::Placeholder); + match peer_state { + PeerState::Placeholder => unreachable!(), + PeerState::Active { .. } => { + anyhow::bail!("got connection for already active peer"); + } + PeerState::Establishing { .. } => { + anyhow::bail!("got connection for already establishing peer"); + } + PeerState::Connecting { intents, interests } => { + let mode = if intents.iter().any(|i| matches!(i.mode, SessionMode::Live)) { + SessionMode::Live + } else { + SessionMode::ReconcileOnce + }; + (interests, mode, intents) + } + } + } + Role::Betty => { + let intents = if let Some(peer) = peer_info { + let peer_state = std::mem::replace(&mut peer.state, PeerState::Placeholder); + match peer_state { + PeerState::Placeholder => unreachable!(), + PeerState::Active { .. } => { + anyhow::bail!("got connection for already active peer"); + } + PeerState::Establishing { .. } => { + anyhow::bail!("got connection for already establishing peer"); + } + PeerState::Connecting { intents, .. } => { + // TODO: Decide which conn to use. + intents + } + } + } else { + Default::default() + }; + let interests = self.actor.resolve_interests(Interests::All).await?; + (interests, SessionMode::Live, intents) + } + }; + + let me = self.endpoint.node_id(); + let actor = self.actor.clone(); + let submitted_interests = interests.clone(); + let init = SessionInit { + mode, + interests: Interests::Exact(interests), + }; + let establish_fut = async move { + let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; + let session_handle = actor + .init_session(peer, our_role, initial_transmission, channels, init) + .await?; + Ok::<_, anyhow::Error>((tasks, session_handle)) + }; + let establish_fut = establish_fut.map(move |res| (peer, res)); + let task_handle = self.establish_tasks.spawn(establish_fut); + let peer_state = PeerState::Establishing { + our_role, + intents, + submitted_interests, + pending_interests: Default::default(), + task_handle, + }; + let peer_info = PeerInfo { state: peer_state }; + self.peers.insert(peer, peer_info); + // peer. + // crate::net::run(me, self.actor.clone(), conn, our_role, init) + Ok(()) + } +} + +fn merge_interests(a: &mut InterestMap, b: InterestMap) { + for (cap, aois) in b.into_iter() { + a.entry(cap).or_default().extend(aois); + } +} + +fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { + let mut out = NamespaceInterests::new(); + for (cap, aois) in interests { + out.entry(cap.namespace()).or_default().extend(aois.clone()); + } + out +} + +#[cfg(test)] +mod tests { + use iroh_net::{Endpoint, NodeAddr, NodeId}; + use rand::SeedableRng; + use std::collections::{BTreeMap, BTreeSet, HashMap}; + + use super::{EventKind, ManagedHandle, ALPN}; + use crate::{ + actor::ActorHandle, + auth::{CapSelector, DelegateTo}, + form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, + net::run, + proto::{ + grouping::{Area, AreaOfInterest, ThreeDRange}, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + willow::{Entry, InvalidPath, Path}, + }, + session::{Interests, Role, SessionInit, SessionMode}, + }; + + #[tokio::test(flavor = "multi_thread")] + async fn peer_manager_simple() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let (alfie, alfie_ep, alfie_addr, alfie_task) = create(&mut rng).await?; + let (betty, betty_ep, betty_addr, betty_task) = create(&mut rng).await?; + + let betty_node_id = betty_addr.node_id; + alfie_ep.add_node_addr(betty_addr)?; + + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + + let path = Path::new(&[b"foo", b"1"])?; + let entry = EntryForm::new_bytes(namespace_id, path, "foo 1"); + let (_, inserted) = betty.insert(entry, user_betty).await?; + assert!(inserted); + + let path = Path::new(&[b"bar", b"2"])?; + let entry = EntryForm::new_bytes(namespace_id, path, "bar 1"); + let (_, inserted) = betty.insert(entry, user_betty).await?; + assert!(inserted); + + let path = Path::new(&[b"bar", b"3"])?; + let entry = EntryForm::new_bytes(namespace_id, path, "bar 2"); + let (_, inserted) = betty.insert(entry, user_betty).await?; + assert!(inserted); + + let t1 = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"foo"]).unwrap(); + let target_area = Area::path(path); + let interests = Interests::select().area(namespace_id, [target_area.clone()]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + let intent_handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::CapabilityIntersection { .. })); + + let event = intent_handle.next().await.unwrap(); + assert!( + matches!(event, EventKind::AoiIntersection { namespace, area } if namespace == namespace_id && area.area == target_area) + ); + + let event = intent_handle.next().await.unwrap(); + let EventKind::Reconciled { namespace, area } = event else { + panic!("expected Reconciled"); + }; + assert_eq!(namespace, namespace_id); + assert_eq!(area.area, target_area); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::ReconciledAll)); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::Closed { result } if result.is_ok())); + + let event = intent_handle.next().await; + assert!(event.is_none()); + } + }); + + let t2 = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"bar"]).unwrap(); + let target_area = Area::path(path); + let interests = Interests::select().area(namespace_id, [target_area.clone()]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + let intent_handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::CapabilityIntersection { .. })); + + let event = intent_handle.next().await.unwrap(); + assert!( + matches!(event, EventKind::AoiIntersection { namespace, area } if namespace == namespace_id && area.area == target_area) + ); + + let event = intent_handle.next().await.unwrap(); + let EventKind::Reconciled { namespace, area } = event else { + panic!("expected Reconciled"); + }; + assert_eq!(namespace, namespace_id); + assert_eq!(area.area, target_area); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::ReconciledAll)); + + let event = intent_handle.next().await.unwrap(); + assert!(matches!(event, EventKind::Closed { result } if result.is_ok())); + + let event = intent_handle.next().await; + assert!(event.is_none()); + } + }); + + t1.await.unwrap(); + t2.await.unwrap(); + betty_task.abort(); + alfie_task.abort(); + Ok(()) + } + + pub async fn create( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<( + ManagedHandle, + Endpoint, + iroh_net::NodeAddr, + tokio::task::JoinHandle>, + )> { + let endpoint = Endpoint::builder() + .secret_key(iroh_net::key::SecretKey::generate_with_rng(rng)) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let node_addr = endpoint.node_addr().await?; + let payloads = iroh_blobs::store::mem::Store::default(); + let create_store = move || crate::store::memory::Store::new(payloads); + let handle = ManagedHandle::spawn(endpoint.clone(), create_store); + let accept_task = tokio::task::spawn({ + let handle = handle.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let alpn = conn.alpn().await?; + if alpn != ALPN { + continue; + } + let conn = conn.await?; + handle.handle_connection(conn).await?; + } + Ok::<_, anyhow::Error>(()) + } + }); + Ok((handle, endpoint, node_addr, accept_task)) + } +} diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 0a71a3531e..11ab6d5bab 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -11,7 +11,7 @@ use tracing::{debug, trace}; use crate::{ proto::{ - grouping::{Area, ThreeDRange}, + grouping::ThreeDRange, keys::NamespaceId, sync::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, ReconciliationAnnounceEntries, @@ -23,14 +23,15 @@ use crate::{ session::{ aoi_finder::{AoiIntersection, AoiIntersectionQueue}, channels::{ChannelSenders, MessageReceiver}, - events::{Event, EventEmitter}, + events::{EventKind, EventSender}, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, Error, Role, SessionId, }, store::{ + entry::{EntryChannel, EntryOrigin}, traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, - Origin, Store, + Store, }, util::stream::Cancelable, }; @@ -39,7 +40,7 @@ use crate::{ pub struct Reconciler { shared: Shared, recv: Cancelable>, - events: EventEmitter, + events: EventSender, targets: TargetMap, current_entry: CurrentEntry, } @@ -47,6 +48,7 @@ pub struct Reconciler { type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); impl Reconciler { + #[allow(clippy::too_many_arguments)] pub fn new( store: Store, recv: Cancelable>, @@ -55,7 +57,7 @@ impl Reconciler { session_id: SessionId, send: ChannelSenders, our_role: Role, - events: EventEmitter, + events: EventSender, ) -> Result { let shared = Shared { store, @@ -78,6 +80,7 @@ impl Reconciler { loop { tokio::select! { message = self.recv.try_next() => { + tracing::trace!(?message, "tick: recv"); match message? { None => break, Some(message) => match self.received_message(message).await? { @@ -90,10 +93,12 @@ impl Reconciler { } } Ok(intersection) = self.targets.aoi_intersection_queue.recv_async() => { + tracing::trace!(?intersection, "tick: interesection"); let intersection = intersection; let area = intersection.intersection.clone(); + let namespace = intersection.namespace; self.targets.init_target(&self.shared, intersection).await?; - self.events.send(Event::AreaIntersection(area)).await?; + self.events.send(EventKind::AoiIntersection { namespace, area }).await?; } } } @@ -141,10 +146,13 @@ impl Reconciler { authorised_entry.entry().payload_digest, message.entry.available, )?; - self.shared - .store - .entries() - .ingest(&authorised_entry, Origin::Remote(self.shared.session_id))?; + self.shared.store.entries().ingest( + &authorised_entry, + EntryOrigin::Remote { + session: self.shared.session_id, + channel: EntryChannel::Reconciliation, + }, + )?; } ReconciliationMessage::SendPayload(message) => { self.current_entry @@ -175,7 +183,10 @@ impl Reconciler { .map .remove(&id) .ok_or(Error::InvalidMessageInCurrentState)?; - let event = Event::Reconciled(target.area); + let event = EventKind::Reconciled { + area: target.intersection.intersection.clone(), + namespace: target.namespace(), + }; self.events.send(event).await?; if self.targets.map.is_empty() { Ok(ControlFlow::Break(())) @@ -206,6 +217,7 @@ impl TargetMap { tracing::info!("aoi wait: {requested_id:?}"); if !self.map.contains_key(requested_id) { self.wait_for_target(shared, requested_id).await?; + tracing::info!("aoi resolved: {requested_id:?}"); } return Ok(self.map.get_mut(requested_id).unwrap()); } @@ -334,10 +346,7 @@ struct Shared { struct Target { snapshot: ::Snapshot, - our_handle: AreaOfInterestHandle, - their_handle: AreaOfInterestHandle, - namespace: NamespaceId, - area: Area, + intersection: AoiIntersection, our_uncovered_ranges: HashSet, started: bool, @@ -348,7 +357,7 @@ struct Target { impl Target { fn id(&self) -> TargetId { - (self.our_handle, self.their_handle) + self.intersection.id() } async fn init( snapshot: ::Snapshot, @@ -357,10 +366,7 @@ impl Target { ) -> Result { let mut this = Target { snapshot, - our_handle: intersection.our_handle, - their_handle: intersection.their_handle, - namespace: intersection.namespace, - area: intersection.intersection, + intersection, our_uncovered_ranges: Default::default(), started: false, our_range_counter: 0, @@ -372,9 +378,13 @@ impl Target { Ok(this) } + fn namespace(&self) -> NamespaceId { + self.intersection.namespace + } + async fn initiate(&mut self, shared: &Shared) -> Result<(), Error> { - let range = self.area.into_range(); - let fingerprint = self.snapshot.fingerprint(self.namespace, &range)?; + let range = self.intersection.area().into_range(); + let fingerprint = self.snapshot.fingerprint(self.namespace(), &range)?; self.send_fingerprint(shared, range, fingerprint, None) .await?; Ok(()) @@ -394,7 +404,9 @@ impl Target { } let range_count = self.next_range_count_theirs(); - let our_fingerprint = self.snapshot.fingerprint(self.namespace, &message.range)?; + let our_fingerprint = self + .snapshot + .fingerprint(self.namespace(), &message.range)?; // case 1: fingerprint match. if our_fingerprint == message.fingerprint { @@ -421,7 +433,7 @@ impl Target { let split_opts = SplitOpts::default(); let snapshot = self.snapshot.clone(); let mut iter = snapshot - .split_range(self.namespace, &message.range, &split_opts)? + .split_range(self.namespace(), &message.range, &split_opts)? .peekable(); while let Some(res) = iter.next() { let (subrange, action) = res?; @@ -478,8 +490,8 @@ impl Target { let msg = ReconciliationSendFingerprint { range, fingerprint, - sender_handle: self.our_handle, - receiver_handle: self.their_handle, + sender_handle: self.intersection.our_handle, + receiver_handle: self.intersection.their_handle, covers, }; shared.send.send(msg).await?; @@ -496,15 +508,15 @@ impl Target { ) -> Result<(), Error> { let our_entry_count = match our_entry_count { Some(count) => count, - None => self.snapshot.count(self.namespace, range)?, + None => self.snapshot.count(self.namespace(), range)?, }; let msg = ReconciliationAnnounceEntries { range: range.clone(), count: our_entry_count, want_response, will_sort: false, // todo: sorted? - sender_handle: self.our_handle, - receiver_handle: self.their_handle, + sender_handle: self.intersection.our_handle, + receiver_handle: self.intersection.their_handle, covers, }; if want_response { @@ -514,7 +526,7 @@ impl Target { for authorised_entry in self .snapshot - .get_entries_with_authorisation(self.namespace, range) + .get_entries_with_authorisation(self.namespace(), range) { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 406a5a92e5..3de17d1fec 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,4 +1,4 @@ -use std::rc::Rc; +use std::{cell::RefCell, rc::Rc}; use futures_concurrency::stream::StreamExt as _; use futures_lite::StreamExt as _; @@ -14,10 +14,10 @@ use crate::{ aoi_finder::AoiFinder, capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, - events::{Event, EventEmitter}, + events::{EventKind, EventSender, SessionEvent}, pai_finder::{self as pai, PaiFinder, PaiIntersection}, static_tokens::StaticTokens, - Channels, Error, Role, SessionId, SessionInit, + Channels, Error, Role, SessionId, SessionInit, SessionUpdate, }, store::{ traits::{SecretStorage, Storage}, @@ -43,6 +43,8 @@ pub async fn run_session( our_role: Role, init: SessionInit, initial_transmission: InitialTransmission, + event_sender: EventSender, + update_receiver: flume::Receiver, ) -> Result<(), Error> { let Channels { send, recv } = channels; let ChannelReceivers { @@ -66,8 +68,7 @@ pub async fn run_session( let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); - - let events = EventEmitter::default(); + let mut update_receiver = Cancelable::new(update_receiver.into_stream(), cancel_token.clone()); let caps = Capabilities::new( initial_transmission.our_nonce, @@ -78,11 +79,63 @@ pub async fn run_session( let tasks = Tasks::default(); - let interests = store.auth().find_read_caps_for_interests(init.interests)?; - let interests = Rc::new(interests); + let initial_interests = store.auth().resolve_interests(init.interests)?; + tracing::warn!("INIT INTEREST {initial_interests:?}"); + let all_interests = Rc::new(RefCell::new(initial_interests.clone())); + let initial_interests = Rc::new(initial_interests); - // Setup the private area intersection finder. + // Setup a channel for the private area intersection finder. let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); + + // Spawn a task to handle session updates. + tasks.spawn(error_span!("upd"), { + // let tokens = tokens.clone(); + let store = store.clone(); + let caps = caps.clone(); + let to_pai = pai_inbox_tx.clone(); + let all_interests = all_interests.clone(); + async move { + while let Some(update) = update_receiver.next().await { + match update { + SessionUpdate::AddInterests(interests) => { + caps.revealed().await; + let interests = store.auth().resolve_interests(interests)?; + tracing::warn!("UPDATE INTEREST {interests:?}"); + for (authorisation, aois) in interests.into_iter() { + all_interests + .borrow_mut() + .entry(authorisation.clone()) + .or_default() + .extend(aois); + to_pai + .send_async(pai::Input::SubmitAuthorisation(authorisation)) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } + } + } + // tokens.bind_theirs(message.static_token); + } + Ok(()) + } + }); + + // Spawn a task to setup the initial interests + tasks.spawn(error_span!("setup-pai"), { + let caps = caps.clone(); + let to_pai = pai_inbox_tx.clone(); + async move { + caps.revealed().await; + for authorisation in initial_interests.keys() { + to_pai + .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } + Ok(()) + } + }); + tasks.spawn(error_span!("pai"), { let store = store.clone(); let send = send.clone(); @@ -90,9 +143,9 @@ pub async fn run_session( let inbox = pai_inbox_rx .into_stream() .merge(intersection_recv.map(pai::Input::ReceivedMessage)); - let interests = Rc::clone(&interests); + let interests = Rc::clone(&all_interests); let aoi_finder = aoi_finder.clone(); - let events = events.clone(); + let event_sender = event_sender.clone(); async move { let mut gen = PaiFinder::run_gen(inbox); loop { @@ -100,11 +153,10 @@ pub async fn run_session( GeneratorState::Yielded(output) => match output { pai::Output::SendMessage(message) => send.send(message).await?, pai::Output::NewIntersection(intersection) => { - events - .send(Event::CapabilityIntersection( - intersection.authorisation.clone(), - )) - .await?; + let event = EventKind::CapabilityIntersection { + capability: intersection.authorisation.read_cap().clone(), + }; + event_sender.send(event).await?; on_pai_intersection( &interests, store.secrets(), @@ -210,7 +262,7 @@ pub async fn run_session( session_id, send.clone(), our_role, - events, + event_sender.clone(), )?; async move { let res = reconciler.run().await; @@ -225,15 +277,7 @@ pub async fn run_session( // Spawn a task to handle control messages tasks.spawn(error_span!("ctl-recv"), { let cancel_token = cancel_token.clone(); - let fut = control_loop( - our_role, - interests, - caps, - send.clone(), - tasks.clone(), - control_recv, - pai_inbox_tx, - ); + let fut = control_loop(our_role, caps, send.clone(), control_recv, pai_inbox_tx); async move { let res = fut.await; if res.is_ok() { @@ -308,10 +352,8 @@ pub type Tasks = SharedJoinMap>; async fn control_loop( our_role: Role, - our_interests: Rc, caps: Capabilities, sender: ChannelSenders, - tasks: Tasks, mut control_recv: Cancelable>, to_pai: flume::Sender, ) -> Result<(), Error> { @@ -334,21 +376,6 @@ async fn control_loop( match message { Message::CommitmentReveal(msg) => { caps.received_commitment_reveal(our_role, msg.nonce)?; - - let submit_interests_fut = { - let to_pai = to_pai.clone(); - let our_interests = Rc::clone(&our_interests); - async move { - for authorisation in our_interests.keys() { - to_pai - .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; - } - Ok(()) - } - }; - tasks.spawn(error_span!("setup-pai"), submit_interests_fut); } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; @@ -379,7 +406,7 @@ async fn control_loop( } async fn on_pai_intersection( - interests: &InterestMap, + interests: &Rc>, secrets: &S, aoi_finder: &AoiFinder, capabilities: &Capabilities, @@ -390,15 +417,19 @@ async fn on_pai_intersection( authorisation, handle, } = intersection; - let aois = interests - .get(&authorisation) - .ok_or(Error::NoKnownInterestsForCapability)?; + let aois = { + let interests = interests.borrow(); + interests + .get(&authorisation) + .ok_or(Error::NoKnownInterestsForCapability)? + .clone() + }; let namespace = authorisation.namespace(); let capability_handle = capabilities .bind_and_send_ours(secrets, sender, handle, authorisation.read_cap().clone()) .await?; - for aoi in aois.iter().cloned() { + for aoi in aois.into_iter() { aoi_finder .bind_and_send_ours(sender, namespace, aoi, capability_handle) .await?; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index f8439feb33..94d50f7e51 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -14,7 +14,7 @@ use crate::{ use self::traits::Storage; -pub use self::entry::{Origin, WatchableEntryStore}; +pub use self::entry::{EntryOrigin, WatchableEntryStore}; pub mod auth; pub mod entry; @@ -75,7 +75,9 @@ impl Store { .get_user(&user_id) .ok_or(Error::MissingUserKey(user_id))?; let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; - let inserted = self.entries().ingest(&authorised_entry, Origin::Local)?; + let inserted = self + .entries() + .ingest(&authorised_entry, EntryOrigin::Local)?; Ok((authorised_entry.into_entry(), inserted)) } diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index 7403745bb6..88fa1b97ab 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -1,3 +1,4 @@ +use iroh_net::NodeId; use std::{ collections::HashMap, sync::{Arc, Mutex}, @@ -17,11 +18,33 @@ use super::traits::EntryStorage; const BROADCAST_CAP: usize = 1024; #[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum Origin { +pub enum EntryOrigin { /// The entry is inserted locally. Local, - /// The entry is synced in a sync session. - Remote(SessionId), + /// The entry was received from a peer. + Remote { + session: SessionId, + channel: EntryChannel, + }, // TODO: Add details. + // Remote { + // peer: NodeId, + // channel: EntryChannel, + // }, +} + +impl EntryOrigin { + // pub fn peer(&self) -> Option { + // match self { + // EntryOrigin::Local => None, + // EntryOrigin::Remote { peer, .. } => Some(peer) + // } + // } +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum EntryChannel { + Reconciliation, + Data, } #[derive(Debug, Clone)] @@ -52,7 +75,7 @@ impl WatchableEntryStore { /// /// Returns `true` if the entry was stored, and `false` if the entry already exists or is /// obsoleted by an existing entry. - pub fn ingest(&self, entry: &AuthorisedEntry, origin: Origin) -> anyhow::Result { + pub fn ingest(&self, entry: &AuthorisedEntry, origin: EntryOrigin) -> anyhow::Result { if self.storage.ingest_entry(entry)? { self.broadcast.lock().unwrap().broadcast(entry, origin); Ok(true) @@ -126,14 +149,14 @@ impl Broadcaster { .push(area) } - fn broadcast(&mut self, entry: &AuthorisedEntry, origin: Origin) { + fn broadcast(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) { let Some(sessions) = self.watched_areas.get_mut(&entry.namespace_id()) else { return; }; let mut dropped_receivers = vec![]; for (session_id, areas) in sessions { // Do not broadcast back into sessions where the entry came from. - if origin == Origin::Remote(*session_id) { + if matches!(origin, EntryOrigin::Remote { session, ..} if session == *session_id) { continue; } // Check if the session is watching an area where the entry falls into. From 540cd8b6ae4dd4a0150bd53cbade3a9575e747eb Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 12 Jul 2024 15:28:42 +0200 Subject: [PATCH 084/198] cleanup and refactor and test --- iroh-willow/src/auth.rs | 8 +- iroh-willow/src/net.rs | 2 +- iroh-willow/src/proto/grouping.rs | 6 + iroh-willow/src/session.rs | 4 +- iroh-willow/src/session/aoi_finder.rs | 1 - iroh-willow/src/session/capabilities.rs | 4 + iroh-willow/src/session/error.rs | 26 + iroh-willow/src/session/events.rs | 648 ++++++++++++++---------- iroh-willow/src/session/pai_finder.rs | 5 + iroh-willow/src/session/reconciler.rs | 15 +- iroh-willow/src/session/resource.rs | 6 + iroh-willow/src/session/run.rs | 58 ++- iroh-willow/src/util/task.rs | 2 +- 13 files changed, 473 insertions(+), 312 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index 69a84fd409..4c414256cf 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeSet, HashMap}, + collections::{BTreeSet, HashMap, HashSet}, sync::{Arc, RwLock}, }; @@ -19,7 +19,7 @@ use crate::{ store::traits::{SecretStorage, SecretStoreError, Storage}, }; -pub type InterestMap = HashMap>; +pub type InterestMap = HashMap>; #[derive(Debug, Clone)] pub struct DelegateTo { @@ -235,13 +235,13 @@ impl Auth { .map(|auth| { let area = auth.read_cap().granted_area(); let aoi = AreaOfInterest::new(area); - (auth, BTreeSet::from_iter([aoi])) + (auth, HashSet::from_iter([aoi])) }) .collect::>(); Ok(out) } Interests::Select(interests) => { - let mut out: HashMap> = HashMap::new(); + let mut out: InterestMap = HashMap::new(); for (cap_selector, aoi_selector) in interests { let cap = self.get_read_cap(&cap_selector)?; if let Some(cap) = cap { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index b42068094b..1ebe72a7d1 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -290,7 +290,7 @@ async fn exchange_commitments( }) } -async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result<()> { +pub async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result<()> { let mut final_result = Ok(()); let mut joined = 0; while let Some(res) = join_set.join_next().await { diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index fe48703139..447bcb4a49 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -672,6 +672,8 @@ impl<'a> Encoder for AreaInArea<'a> { #[cfg(test)] mod tests { + use std::collections::{BTreeSet, HashSet}; + use crate::proto::{grouping::Area, willow::Path}; #[test] @@ -681,5 +683,9 @@ mod tests { let p2 = Path::new(&[b"foo", b"bar"]).unwrap(); let a2 = Area::path(p2); assert_eq!(a1, a2); + let mut set = HashSet::new(); + set.insert(a1.clone()); + set.insert(a2.clone()); + assert_eq!(set.len(), 1); } } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 63cd3256cf..1181a0baee 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,4 +1,4 @@ -use std::collections::{hash_map, BTreeMap, BTreeSet, HashMap}; +use std::collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; use crate::{ auth::CapSelector, @@ -64,7 +64,7 @@ pub enum Interests { #[default] All, Select(HashMap), - Exact(HashMap>), + Exact(HashMap>), } impl Interests { diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 73676b16f8..114b4305d3 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -114,7 +114,6 @@ impl Inner { let candidate_handle = *candidate_handle; // Check if we have an intersection. if let Some(intersection) = candidate.aoi.intersection(&aoi) { - tracing::warn!(a=?aoi, b=?candidate.aoi, ?intersection, "AOI INTERSECTION"); // We found an intersection! let (our_handle, their_handle) = match scope { Scope::Ours => (handle, candidate_handle), diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index b4ecd7d892..a49bcd8a2e 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -73,6 +73,10 @@ impl Capabilities { Ok(handle) } + pub fn find_ours(&self, cap: &ReadCapability) -> Option { + self.0.borrow().ours.find(cap) + } + pub fn bind_and_sign_ours( &self, secret_store: &S, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 2f08c8e542..a5a35c07da 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -73,6 +73,32 @@ pub enum Error { Net(anyhow::Error), } +// TODO: Remove likely? +// Added this to be able to implement PartialEq on EventKind for tests +// but many errors are not PartialEq, so we just return false for them, always +impl PartialEq for Error { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Store(_), Self::Store(_)) => false, + (Self::Auth(_), Self::Auth(_)) => false, + (Self::PayloadStore(_), Self::PayloadStore(_)) => false, + (Self::KeyStore(_), Self::KeyStore(_)) => false, + (Self::Receive(_), Self::Receive(_)) => false, + (Self::Write(_), Self::Write(_)) => false, + (Self::TaskFailed(_), Self::TaskFailed(_)) => false, + (Self::Pai(_), Self::Pai(_)) => false, + (Self::Net(_), Self::Net(_)) => false, + (Self::MissingResource(l0), Self::MissingResource(r0)) => l0 == r0, + (Self::InvalidParameters(l0), Self::InvalidParameters(r0)) => l0 == r0, + (Self::InvalidState(l0), Self::InvalidState(r0)) => l0 == r0, + (Self::MissingUserKey(l0), Self::MissingUserKey(r0)) => l0 == r0, + _ => core::mem::discriminant(self) == core::mem::discriminant(other), + } + } +} + +impl Eq for Error {} + impl From for Error { fn from(_value: Unauthorised) -> Self { Self::UnauthorisedEntryReceived diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs index 8885077593..ec3e161422 100644 --- a/iroh-willow/src/session/events.rs +++ b/iroh-willow/src/session/events.rs @@ -35,22 +35,6 @@ type NamespaceInterests = HashMap>; const COMMAND_CHANNEL_CAP: usize = 128; -// type Names -// type NamespaceAuthInterests = HashMap; -// -// #[derive(Debug, Clone, Default)] -// struct InterestsByNamespace { -// auths: BTreeSet, -// aois: BTreeSet, -// } - -// impl InterestsByNamespace { -// fn add(&mut self, other: &InterestsByNamespace) { -// self.auths.extend(other.auths); -// self.aois.extend(other.aois); -// } -// } - #[derive(Debug, Clone)] pub struct EventSender { session_id: SessionId, @@ -82,17 +66,16 @@ impl SessionEvent { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub enum EventKind { CapabilityIntersection { - capability: ReadCapability, + namespace: NamespaceId, + area: Area, }, - // TODO: AoI - AoiIntersection { + InterestIntersection { namespace: NamespaceId, area: AreaOfInterest, }, - // TODO: AoI Reconciled { namespace: NamespaceId, area: AreaOfInterest, @@ -100,16 +83,14 @@ pub enum EventKind { ReconciledAll, Closed { result: Result<(), Arc>, - }, // ReconciledAll, + }, } impl EventKind { pub fn namespace(&self) -> Option { match self { - EventKind::CapabilityIntersection { capability } => { - Some(capability.granted_namespace().id()) - } - EventKind::AoiIntersection { namespace, .. } => Some(*namespace), + EventKind::CapabilityIntersection { namespace, .. } => Some(*namespace), + EventKind::InterestIntersection { namespace, .. } => Some(*namespace), EventKind::Reconciled { namespace, .. } => Some(*namespace), _ => None, } @@ -164,6 +145,7 @@ impl ManagedHandle { command_rx, command_tx: command_tx.clone(), establish_tasks: Default::default(), + net_tasks: Default::default(), actor: actor.clone(), peers: Default::default(), sessions: Default::default(), @@ -214,9 +196,10 @@ pub struct PeerManager { command_rx: flume::Receiver, command_tx: flume::Sender, establish_tasks: JoinSet, + net_tasks: JoinSet<(NodeId, Result<()>)>, actor: ActorHandle, - peers: HashMap, + peers: HashMap, // auth: Auth, sessions: HashMap, // intents: HashMap, @@ -233,6 +216,7 @@ struct SessionInfo { submitted_interests: InterestMap, intents: Vec, handle: SessionHandle, + net_error: Option, } impl SessionInfo { @@ -327,13 +311,13 @@ impl IntentHandle { } } - pub async fn add_interests(&self, interests: Interests) -> Result<()> { + pub async fn add_interests(&self, interests: impl Into) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.sender .send_async(Command::UpdateIntent { peer: self.peer, intent_id: self.intent_id, - add_interests: interests, + add_interests: interests.into(), reply, }) .await?; @@ -341,22 +325,6 @@ impl IntentHandle { } } -// #[derive(Debug, Clone)] -// pub enum SyncEvent { -// Progress(EventKind), -// ReconciledAll, -// } - -// struct InterestInfo { -// aoi: AreaOfInterest, -// reconciled: bool, -// } - -#[derive(Debug)] -struct PeerInfo { - state: PeerState, -} - #[derive(Debug)] enum PeerState { Connecting { @@ -368,11 +336,9 @@ enum PeerState { intents: Vec, submitted_interests: InterestMap, pending_interests: InterestMap, - task_handle: AbortHandle, }, Active { session_id: SessionId, - net_tasks: JoinSet>, }, // Closing { // session_id: SessionId, @@ -380,6 +346,16 @@ enum PeerState { Placeholder, } +impl PeerState { + pub fn into_intents(self) -> Option> { + match self { + PeerState::Connecting { intents, .. } => Some(intents), + PeerState::Establishing { intents, .. } => Some(intents), + _ => None, + } + } +} + impl IntentInfo { fn merge_interests(&mut self, interests: &InterestMap) { for (auth, aois) in interests.iter() { @@ -389,50 +365,6 @@ impl IntentInfo { .extend(aois.clone()); } } - // fn handle_event(&mut self, event: &EventKind) -> (bool, Continuation) { - // match event { - // EventKind::CapabilityIntersection { capability } => { - // if self - // .interests - // .contains_key(&capability.granted_namespace().id()) - // { - // (true, Continuation::Continue) - // } else { - // (false, Continuation::Continue) - // } - // } - // EventKind::AoiIntersection { area, namespace } => match self.interests.get(namespace) { - // None => (false, Continuation::Continue), - // Some(interests) => { - // let matches = interests - // .iter() - // .any(|x| x.area.has_intersection(&area.area)); - // (matches, Continuation::Continue) - // } - // }, - // EventKind::Reconciled { area, namespace } => { - // let Some(interests) = self.interests.get_mut(namespace) else { - // return (false, Continuation::Continue); - // }; - // let matches = interests - // .iter() - // .any(|x| x.area.has_intersection(&area.area)); - // let cont = if matches { - // interests.retain(|x| area.area.includes_area(&x.area)); - // if interests.is_empty() { - // Continuation::Complete - // } else { - // Continuation::Continue - // } - // } else { - // Continuation::Continue - // }; - // (matches, cont) - // } - // EventKind::Closed { .. } => (true, Continuation::Complete), - // EventKind::ReconciledAll => (false, Continuation::Complete), - // } - // } async fn handle_event(&mut self, event: &EventKind) -> Result { let send = |event: EventKind| async { @@ -441,68 +373,47 @@ impl IntentInfo { .await .map_err(|_| ReceiverDropped) }; - match &event { - EventKind::CapabilityIntersection { capability } => { - if self - .interests - .contains_key(&capability.granted_namespace().id()) - { + + let stay_alive = match &event { + EventKind::CapabilityIntersection { namespace, .. } => { + if self.interests.contains_key(namespace) { send(event.clone()).await?; - Ok(true) - } else { - Ok(true) } + true } - EventKind::AoiIntersection { area, namespace } => match self.interests.get(namespace) { - None => Ok(true), - Some(interests) => { + EventKind::InterestIntersection { area, namespace } => { + if let Some(interests) = self.interests.get(namespace) { let matches = interests .iter() .any(|x| x.area.has_intersection(&area.area)); if matches { send(event.clone()).await?; } - Ok(true) } - }, + true + } EventKind::Reconciled { area, namespace } => { - let Some(interests) = self.interests.get_mut(namespace) else { - return Ok(true); - }; - let matches = interests - .iter() - .any(|x| x.area.has_intersection(&area.area)); - tracing::info!(?interests, ?matches, ?area, "reconciled pre"); - if matches { - interests.retain(|x| !area.area.includes_area(&x.area)); - tracing::info!(?interests, ?matches, "reconciled post"); - send(event.clone()).await?; - if interests.is_empty() { - send(EventKind::ReconciledAll).await?; - Ok(true) - } else { - Ok(true) + if let Some(interests) = self.interests.get_mut(namespace) { + let matches = interests + .iter() + .any(|x| x.area.has_intersection(&area.area)); + if matches { + send(event.clone()).await?; + interests.retain(|x| !area.area.includes_area(&x.area)); + if interests.is_empty() { + send(EventKind::ReconciledAll).await?; + } } - } else { - Ok(true) } + true } EventKind::Closed { .. } => { send(event.clone()).await?; - Ok(false) + false } - EventKind::ReconciledAll => Ok(true), - } - - // let (should_sent, cont) = self.handle_event(event); - // if should_sent { - // self.sender - // .send_async(event.clone()) - // .await - // .map_err(|_| ReceiverDropped)?; - // } - // Ok(cont) - // let event = Ok(SyncEvent::Progress(event.clone())); + EventKind::ReconciledAll => true, + }; + Ok(stay_alive) } } @@ -510,11 +421,6 @@ impl IntentInfo { #[error("receiver dropped")] pub struct ReceiverDropped; -enum Continuation { - Continue, - Complete, -} - impl PeerManager { pub async fn run(mut self) -> Result<(), Error> { loop { @@ -536,9 +442,22 @@ impl PeerManager { self.on_established(res).await?; } + Some(res) = self.net_tasks.join_next(), if !self.net_tasks.is_empty() => { + match res { + Err(err) if err.is_cancelled() => { + continue; + }, + Err(err) => Err(err).context("net task paniced")?, + Ok((peer, res)) => { + if let Err(err) = res { + self.on_conn_fail(peer, err); + } + } + } + }, Some((peer, conn)) = self.dialer.next() => { match conn { - Ok(conn) => self.handle_connection(conn, Role::Alfie).await?, + Ok(conn) => self.handle_connection(conn, Role::Alfie).await, Err(err) => self.on_dial_fail(peer, err).await, } @@ -550,11 +469,11 @@ impl PeerManager { } async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { - let Some(peer_info) = self.peers.remove(&peer) else { + let Some(peer_state) = self.peers.remove(&peer) else { tracing::warn!(?peer, "dialer returned connection error for unknown peer"); return; }; - let PeerState::Connecting { intents, .. } = peer_info.state else { + let PeerState::Connecting { intents, .. } = peer_state else { tracing::warn!( ?peer, "dialer returned connection error for peer in wrong state" @@ -572,57 +491,67 @@ impl PeerManager { } } + fn session_mut(&mut self, peer: &NodeId) -> Option<&mut SessionInfo> { + let peer_state = self.peers.get(peer)?; + match peer_state { + PeerState::Active { session_id } => self.sessions.get_mut(session_id), + _ => None, + } + } + + fn on_conn_fail(&mut self, peer: NodeId, err: anyhow::Error) { + if let Some(session) = self.session_mut(&peer) { + if session.net_error.is_none() { + session.net_error = Some(err); + } + } + } + async fn on_established(&mut self, res: EstablishRes) -> anyhow::Result<()> { let (peer, res) = res; - let peer_info = self + let peer_state = self .peers .get_mut(&peer) .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; - let peer_state = std::mem::replace(&mut peer_info.state, PeerState::Placeholder); + let current_state = std::mem::replace(peer_state, PeerState::Placeholder); let PeerState::Establishing { our_role, intents, submitted_interests, pending_interests, - task_handle: _, - } = peer_state + } = current_state else { anyhow::bail!("unreachable: on_established called for peer in wrong state") }; match res { - Ok((net_tasks, session_handle)) => { + Ok((mut net_tasks, session_handle)) => { if our_role.is_alfie() && intents.is_empty() { session_handle.close(); } let session_id = session_handle.session_id(); + self.net_tasks.spawn( + async move { crate::net::join_all(&mut net_tasks).await } + .map(move |r| (peer, r)), + ); let mut session_info = SessionInfo { our_role, - // peer, complete_areas: Default::default(), submitted_interests, intents, handle: session_handle, + net_error: None, }; if !pending_interests.is_empty() { session_info.push_interests(pending_interests).await?; } self.sessions.insert(session_id, session_info); - peer_info.state = PeerState::Active { - session_id, - net_tasks, - }; + *peer_state = PeerState::Active { session_id }; } Err(err) => { tracing::warn!(?peer, ?err, "establishing session failed"); let result = Err(Arc::new(Error::Net(err))); - for intent in intents { - let result = result.clone(); - intent - .sender - .send_async(EventKind::Closed { result }) - .await - .ok(); - } + let senders = intents.into_iter().map(|intent| intent.sender); + send_all(senders, EventKind::Closed { result }).await; self.peers.remove(&peer); } } @@ -643,25 +572,22 @@ impl PeerManager { intent_id }; let intent_info = IntentInfo { - // peer, intent_id, interests: flatten_interests(&intent_interests), mode: init.mode, sender, }; - // self.intents.insert(intent_id, intent_info); match self.peers.get_mut(&peer) { None => { self.dialer.queue_dial(peer, ALPN); let intents = vec![intent_info]; - let state = PeerState::Connecting { + let peer_state = PeerState::Connecting { intents, interests: intent_interests, }; - let peer_info = PeerInfo { state }; - self.peers.insert(peer, peer_info); + self.peers.insert(peer, peer_state); } - Some(info) => match &mut info.state { + Some(state) => match state { PeerState::Connecting { intents, interests } => { intents.push(intent_info); merge_interests(interests, intent_interests); @@ -700,7 +626,7 @@ impl PeerManager { let add_interests = self.actor.resolve_interests(add_interests).await?; match self.peers.get_mut(&peer) { None => anyhow::bail!("invalid node id"), - Some(peer_info) => match &mut peer_info.state { + Some(peer_state) => match peer_state { PeerState::Connecting { intents, interests } => { let Some(intent_info) = intents.iter_mut().find(|i| i.intent_id == intent_id) else { @@ -714,10 +640,10 @@ impl PeerManager { pending_interests, .. } => { - let Some(intent_info) = intents.iter_mut().find(|i| i.intent_id == intent_id) - else { - anyhow::bail!("invalid intent id"); - }; + let intent_info = intents + .iter_mut() + .find(|i| i.intent_id == intent_id) + .ok_or_else(|| anyhow!("invalid intent id"))?; intent_info.merge_interests(&add_interests); merge_interests(pending_interests, add_interests); } @@ -740,11 +666,11 @@ impl PeerManager { } pub fn cancel_intent(&mut self, peer: NodeId, intent_id: u64) { - let Some(peer_info) = self.peers.get_mut(&peer) else { + let Some(peer_state) = self.peers.get_mut(&peer) else { return; }; - match &mut peer_info.state { + match peer_state { PeerState::Connecting { intents, .. } => { intents.retain(|intent_info| intent_info.intent_id != intent_id); if intents.is_empty() { @@ -762,7 +688,6 @@ impl PeerManager { .retain(|intent| intent.intent_id != intent_id); if session.intents.is_empty() { session.handle.close(); - // TODO: Abort session } } PeerState::Placeholder => unreachable!(), @@ -791,22 +716,20 @@ impl PeerManager { self.cancel_intent(peer, intent_id); } Command::HandleConnection { conn } => { - if let Err(err) = self.handle_connection(conn, Role::Betty).await { - tracing::warn!("failed to handle connection: {err:?}"); - } + self.handle_connection(conn, Role::Betty).await; } } } - pub async fn received_event(&mut self, event: SessionEvent) { - tracing::info!(?event, "command"); + pub async fn received_event(&mut self, mut event: SessionEvent) { + tracing::info!(?event, "event"); let Some(session) = self.sessions.get_mut(&event.session_id) else { tracing::warn!(?event, "Got event for unknown session"); return; }; let mut is_closed = false; - match &event.event { + match &mut event.event { EventKind::Reconciled { namespace, area } => { session .complete_areas @@ -814,8 +737,14 @@ impl PeerManager { .or_default() .insert(area.clone()); } - EventKind::Closed { .. } => { + EventKind::Closed { result } => { is_closed = true; + if result.is_ok() { + // Inject error from networking tasks. + if let Some(net_error) = session.net_error.take() { + *result = Err(Arc::new(Error::Net(net_error))); + } + } } _ => {} } @@ -847,14 +776,37 @@ impl PeerManager { } } - async fn handle_connection(&mut self, conn: Connection, our_role: Role) -> Result<()> { - let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; - let peer_info = self.peers.get_mut(&peer); + async fn handle_connection(&mut self, conn: Connection, our_role: Role) { + let peer = match iroh_net::endpoint::get_remote_node_id(&conn) { + Ok(node_id) => node_id, + Err(err) => { + tracing::warn!(?err, "skip connection: failed to get node id"); + return; + } + }; + if let Err(err) = self.handle_connection_inner(peer, conn, our_role).await { + tracing::warn!(?peer, ?err, "failed to establish connection"); + if let Some(peer_state) = self.peers.remove(&peer) { + if let Some(intents) = peer_state.into_intents() { + let result = Err(Arc::new(Error::Net(err))); + let senders = intents.into_iter().map(|intent| intent.sender); + send_all(senders, EventKind::Closed { result }).await; + } + } + } + } + async fn handle_connection_inner( + &mut self, + peer: NodeId, + conn: Connection, + our_role: Role, + ) -> Result<()> { + let peer_state = self.peers.get_mut(&peer); let (interests, mode, intents) = match our_role { Role::Alfie => { - let peer = peer_info + let peer_state = peer_state .ok_or_else(|| anyhow!("got connection for peer without any intents"))?; - let peer_state = std::mem::replace(&mut peer.state, PeerState::Placeholder); + let peer_state = std::mem::replace(peer_state, PeerState::Placeholder); match peer_state { PeerState::Placeholder => unreachable!(), PeerState::Active { .. } => { @@ -874,8 +826,8 @@ impl PeerManager { } } Role::Betty => { - let intents = if let Some(peer) = peer_info { - let peer_state = std::mem::replace(&mut peer.state, PeerState::Placeholder); + let intents = if let Some(peer_state) = peer_state { + let peer_state = std::mem::replace(peer_state, PeerState::Placeholder); match peer_state { PeerState::Placeholder => unreachable!(), PeerState::Active { .. } => { @@ -912,18 +864,14 @@ impl PeerManager { Ok::<_, anyhow::Error>((tasks, session_handle)) }; let establish_fut = establish_fut.map(move |res| (peer, res)); - let task_handle = self.establish_tasks.spawn(establish_fut); + let _task_handle = self.establish_tasks.spawn(establish_fut); let peer_state = PeerState::Establishing { our_role, intents, submitted_interests, pending_interests: Default::default(), - task_handle, }; - let peer_info = PeerInfo { state: peer_state }; - self.peers.insert(peer, peer_info); - // peer. - // crate::net::run(me, self.actor.clone(), conn, our_role, init) + self.peers.insert(peer, peer_state); Ok(()) } } @@ -942,8 +890,20 @@ fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { out } +async fn send_all( + senders: impl IntoIterator>, + message: T, +) -> Vec>> { + let futs = senders.into_iter().map(|sender| { + let message = message.clone(); + async move { sender.send_async(message).await } + }); + futures_buffered::join_all(futs).await +} + #[cfg(test)] mod tests { + use bytes::Bytes; use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; use std::collections::{BTreeMap, BTreeSet, HashMap}; @@ -964,124 +924,214 @@ mod tests { }; #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_simple() -> anyhow::Result<()> { + async fn peer_manager_two_intents() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let (alfie, alfie_ep, alfie_addr, alfie_task) = create(&mut rng).await?; - let (betty, betty_ep, betty_addr, betty_task) = create(&mut rng).await?; - - let betty_node_id = betty_addr.node_id; - alfie_ep.add_node_addr(betty_addr)?; - - let user_alfie = alfie.create_user().await?; - let user_betty = betty.create_user().await?; - - let namespace_id = alfie - .create_namespace(NamespaceKind::Owned, user_alfie) - .await?; - - let cap_for_betty = alfie - .delegate_caps( - CapSelector::widest(namespace_id), - AccessMode::Write, - DelegateTo::new(user_betty, None), - ) - .await?; - - betty.import_caps(cap_for_betty).await?; - - let path = Path::new(&[b"foo", b"1"])?; - let entry = EntryForm::new_bytes(namespace_id, path, "foo 1"); - let (_, inserted) = betty.insert(entry, user_betty).await?; - assert!(inserted); - - let path = Path::new(&[b"bar", b"2"])?; - let entry = EntryForm::new_bytes(namespace_id, path, "bar 1"); - let (_, inserted) = betty.insert(entry, user_betty).await?; - assert!(inserted); - - let path = Path::new(&[b"bar", b"3"])?; - let entry = EntryForm::new_bytes(namespace_id, path, "bar 2"); - let (_, inserted) = betty.insert(entry, user_betty).await?; - assert!(inserted); - - let t1 = tokio::task::spawn({ + let ( + shutdown, + namespace, + (alfie, _alfie_node_id, _alfie_user), + (betty, betty_node_id, betty_user), + ) = create_and_setup_two(&mut rng).await?; + + insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; + + let task_foo = tokio::task::spawn({ let alfie = alfie.clone(); async move { let path = Path::new(&[b"foo"]).unwrap(); - let target_area = Area::path(path); - let interests = Interests::select().area(namespace_id, [target_area.clone()]); + + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let intent_handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::CapabilityIntersection { .. })); + assert_eq!( + handle.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); - let event = intent_handle.next().await.unwrap(); - assert!( - matches!(event, EventKind::AoiIntersection { namespace, area } if namespace == namespace_id && area.area == target_area) + assert_eq!( + handle.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } ); - let event = intent_handle.next().await.unwrap(); - let EventKind::Reconciled { namespace, area } = event else { - panic!("expected Reconciled"); - }; - assert_eq!(namespace, namespace_id); - assert_eq!(area.area, target_area); + assert_eq!( + handle.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::ReconciledAll)); + assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::Closed { result } if result.is_ok())); + assert_eq!( + handle.next().await.unwrap(), + EventKind::Closed { result: Ok(()) } + ); - let event = intent_handle.next().await; - assert!(event.is_none()); + assert!(handle.next().await.is_none()); } }); - let t2 = tokio::task::spawn({ + let task_bar = tokio::task::spawn({ let alfie = alfie.clone(); async move { let path = Path::new(&[b"bar"]).unwrap(); - let target_area = Area::path(path); - let interests = Interests::select().area(namespace_id, [target_area.clone()]); + + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let intent_handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::CapabilityIntersection { .. })); + let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - let event = intent_handle.next().await.unwrap(); - assert!( - matches!(event, EventKind::AoiIntersection { namespace, area } if namespace == namespace_id && area.area == target_area) + assert_eq!( + handle.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } ); - let event = intent_handle.next().await.unwrap(); - let EventKind::Reconciled { namespace, area } = event else { - panic!("expected Reconciled"); - }; - assert_eq!(namespace, namespace_id); - assert_eq!(area.area, target_area); + assert_eq!( + handle.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!( + handle.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::ReconciledAll)); + assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); - let event = intent_handle.next().await.unwrap(); - assert!(matches!(event, EventKind::Closed { result } if result.is_ok())); + assert_eq!( + handle.next().await.unwrap(), + EventKind::Closed { result: Ok(()) } + ); - let event = intent_handle.next().await; - assert!(event.is_none()); + assert!(handle.next().await.is_none()); } }); - t1.await.unwrap(); - t2.await.unwrap(); - betty_task.abort(); - alfie_task.abort(); + task_foo.await.unwrap(); + task_bar.await.unwrap(); + shutdown(); Ok(()) } + #[tokio::test(flavor = "multi_thread")] + async fn peer_manager_update_intent() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let ( + shutdown, + namespace, + (alfie, _alfie_node_id, _alfie_user), + (betty, betty_node_id, betty_user), + ) = create_and_setup_two(&mut rng).await?; + + insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + + let path = Path::new(&[b"foo"]).unwrap(); + + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::Live); + let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + handle.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + + assert_eq!( + handle.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + handle.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + + let path = Path::new(&[b"bar"]).unwrap(); + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + handle.add_interests(interests).await?; + + assert_eq!( + handle.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + handle.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + + shutdown(); + Ok(()) + } + + pub async fn create_and_setup_two( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<( + impl Fn(), + NamespaceId, + (ManagedHandle, NodeId, UserId), + (ManagedHandle, NodeId, UserId), + )> { + let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng).await?; + let (betty, betty_ep, betty_addr, betty_task) = create(rng).await?; + + let betty_node_id = betty_addr.node_id; + let alfie_node_id = alfie_addr.node_id; + alfie_ep.add_node_addr(betty_addr)?; + betty_ep.add_node_addr(alfie_addr)?; + + let (namespace_id, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + + let shutdown = move || { + betty_task.abort(); + alfie_task.abort(); + }; + Ok(( + shutdown, + namespace_id, + (alfie, alfie_node_id, alfie_user), + (betty, betty_node_id, betty_user), + )) + } + pub async fn create( rng: &mut rand_chacha::ChaCha12Rng, ) -> anyhow::Result<( @@ -1116,4 +1166,40 @@ mod tests { }); Ok((handle, endpoint, node_addr, accept_task)) } + + async fn setup_and_delegate( + alfie: &ManagedHandle, + betty: &ManagedHandle, + ) -> anyhow::Result<(NamespaceId, UserId, UserId)> { + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + Ok((namespace_id, user_alfie, user_betty)) + } + + async fn insert( + handle: &ManagedHandle, + namespace_id: NamespaceId, + user: UserId, + path: &[&[u8]], + bytes: impl Into, + ) -> anyhow::Result<()> { + let path = Path::new(path)?; + let entry = EntryForm::new_bytes(namespace_id, path, bytes); + handle.insert(entry, user).await?; + Ok(()) + } } diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 5dca869114..48f6eb73d4 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -72,6 +72,7 @@ pub struct PaiFinder { our_intersection_handles: ResourceMap, their_intersection_handles: ResourceMap, requested_subspace_cap_handles: HashSet, + submitted: HashSet, } impl PaiFinder { @@ -107,6 +108,7 @@ impl PaiFinder { their_intersection_handles: Default::default(), fragments_info: Default::default(), requested_subspace_cap_handles: Default::default(), + submitted: Default::default(), } } @@ -140,6 +142,9 @@ impl PaiFinder { } async fn submit_authorisation(&mut self, authorisation: ReadAuthorisation) { + if !self.submitted.insert(authorisation.clone()) { + return; + } trace!(?authorisation, "pai submit auth"); let read_cap = authorisation.read_cap(); let fragment_kit = PaiScheme::get_fragment_kit(read_cap); diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 11ab6d5bab..941ca88752 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -26,7 +26,7 @@ use crate::{ events::{EventKind, EventSender}, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, - Error, Role, SessionId, + Error, Role, SessionId, SessionMode, }, store::{ entry::{EntryChannel, EntryOrigin}, @@ -56,8 +56,9 @@ impl Reconciler { static_tokens: StaticTokens, session_id: SessionId, send: ChannelSenders, - our_role: Role, events: EventSender, + our_role: Role, + mode: SessionMode, ) -> Result { let shared = Shared { store, @@ -65,6 +66,7 @@ impl Reconciler { send, static_tokens, session_id, + mode, }; Ok(Self { shared, @@ -87,7 +89,9 @@ impl Reconciler { ControlFlow::Continue(_) => {} ControlFlow::Break(_) => { debug!("reconciliation complete"); - break; + if self.shared.mode == SessionMode::ReconcileOnce { + break; + } } } } @@ -98,7 +102,7 @@ impl Reconciler { let area = intersection.intersection.clone(); let namespace = intersection.namespace; self.targets.init_target(&self.shared, intersection).await?; - self.events.send(EventKind::AoiIntersection { namespace, area }).await?; + self.events.send(EventKind::InterestIntersection { namespace, area }).await?; } } } @@ -214,10 +218,8 @@ impl TargetMap { shared: &Shared, requested_id: &TargetId, ) -> Result<&mut Target, Error> { - tracing::info!("aoi wait: {requested_id:?}"); if !self.map.contains_key(requested_id) { self.wait_for_target(shared, requested_id).await?; - tracing::info!("aoi resolved: {requested_id:?}"); } return Ok(self.map.get_mut(requested_id).unwrap()); } @@ -340,6 +342,7 @@ struct Shared { send: ChannelSenders, static_tokens: StaticTokens, session_id: SessionId, + mode: SessionMode, } #[derive(Debug)] diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index b699ef9fb4..0523472236 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -96,6 +96,12 @@ where (handle, true) } } + + pub fn find(&self, resource: &R) -> Option { + self.map + .iter() + .find_map(|(handle, r)| (r.value == *resource).then_some(*handle)) + } } #[derive(Debug, thiserror::Error)] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 3de17d1fec..0dbe038912 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, rc::Rc}; +use std::{cell::RefCell, collections::hash_map, rc::Rc}; use futures_concurrency::stream::StreamExt as _; use futures_lite::StreamExt as _; @@ -46,6 +46,7 @@ pub async fn run_session( event_sender: EventSender, update_receiver: flume::Receiver, ) -> Result<(), Error> { + debug!(role = ?our_role, mode = ?init.mode, "start session"); let Channels { send, recv } = channels; let ChannelReceivers { control_recv, @@ -80,7 +81,6 @@ pub async fn run_session( let tasks = Tasks::default(); let initial_interests = store.auth().resolve_interests(init.interests)?; - tracing::warn!("INIT INTEREST {initial_interests:?}"); let all_interests = Rc::new(RefCell::new(initial_interests.clone())); let initial_interests = Rc::new(initial_interests); @@ -89,28 +89,53 @@ pub async fn run_session( // Spawn a task to handle session updates. tasks.spawn(error_span!("upd"), { - // let tokens = tokens.clone(); let store = store.clone(); let caps = caps.clone(); let to_pai = pai_inbox_tx.clone(); let all_interests = all_interests.clone(); + let sender = send.clone(); + let aoi_finder = aoi_finder.clone(); async move { while let Some(update) = update_receiver.next().await { match update { SessionUpdate::AddInterests(interests) => { caps.revealed().await; let interests = store.auth().resolve_interests(interests)?; - tracing::warn!("UPDATE INTEREST {interests:?}"); for (authorisation, aois) in interests.into_iter() { - all_interests - .borrow_mut() - .entry(authorisation.clone()) - .or_default() - .extend(aois); - to_pai - .send_async(pai::Input::SubmitAuthorisation(authorisation)) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; + let mut all_interests = all_interests.borrow_mut(); + let is_new_cap; + match all_interests.entry(authorisation.clone()) { + hash_map::Entry::Occupied(mut entry) => { + is_new_cap = false; + entry.get_mut().extend(aois.clone()); + } + hash_map::Entry::Vacant(entry) => { + is_new_cap = true; + entry.insert(aois.clone()); + } + } + drop(all_interests); + if let Some(capability_handle) = + caps.find_ours(authorisation.read_cap()) + { + let namespace = authorisation.namespace(); + for aoi in aois.into_iter() { + aoi_finder + .bind_and_send_ours( + &sender, + namespace, + aoi, + capability_handle, + ) + .await?; + } + } + if is_new_cap { + to_pai + .send_async(pai::Input::SubmitAuthorisation(authorisation)) + .await + .map_err(|_| Error::InvalidState("PAI actor dead"))?; + } } } } @@ -154,7 +179,8 @@ pub async fn run_session( pai::Output::SendMessage(message) => send.send(message).await?, pai::Output::NewIntersection(intersection) => { let event = EventKind::CapabilityIntersection { - capability: intersection.authorisation.read_cap().clone(), + namespace: intersection.authorisation.namespace(), + area: intersection.authorisation.read_cap().granted_area().clone(), }; event_sender.send(event).await?; on_pai_intersection( @@ -261,8 +287,9 @@ pub async fn run_session( tokens.clone(), session_id, send.clone(), - our_role, event_sender.clone(), + our_role, + init.mode, )?; async move { let res = reconciler.run().await; @@ -357,7 +384,6 @@ async fn control_loop( mut control_recv: Cancelable>, to_pai: flume::Sender, ) -> Result<(), Error> { - debug!(role = ?our_role, "start session"); // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; sender.send(reveal_message).await?; diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs index 826038a492..aef803574e 100644 --- a/iroh-willow/src/util/task.rs +++ b/iroh-willow/src/util/task.rs @@ -159,7 +159,7 @@ where pub async fn shutdown(&self) { self.abort_all(); - while let Some(_) = self.join_next().await {} + while self.join_next().await.is_some() {} } } From c3ca8fef3cd787a9f9e9318188125187b85fef6a Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 16 Jul 2024 09:11:45 +0200 Subject: [PATCH 085/198] cleanup and simplify --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/actor.rs | 125 ++-- iroh-willow/src/net.rs | 14 +- iroh-willow/src/session/aoi_finder.rs | 4 +- iroh-willow/src/session/data.rs | 6 +- iroh-willow/src/session/events.rs | 901 +++++++++++++------------- iroh-willow/src/session/reconciler.rs | 32 +- iroh-willow/src/session/run.rs | 36 +- 9 files changed, 543 insertions(+), 577 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c52d751947..d5908a1117 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2957,6 +2957,7 @@ dependencies = [ "test-strategy", "thiserror", "tokio", + "tokio-stream", "tokio-util", "tracing", "tracing-subscriber", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 8bc4d5997f..6e7bca4731 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -43,6 +43,7 @@ hex = "0.4.3" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core", "serde"] } sha2 = "0.10.8" futures-buffered = "0.2.6" +tokio-stream = { version = "0.1.15", features = ["sync"] } [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 656aac6d83..09e5573bfa 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -4,7 +4,10 @@ use anyhow::Result; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; -use tokio::{sync::oneshot, task::JoinSet}; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinSet, +}; use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; @@ -31,7 +34,8 @@ use crate::{ }; pub const INBOX_CAP: usize = 1024; -pub const SESSION_EVENT_CAP: usize = 1024; +pub const SESSION_EVENT_CHANNEL_CAP: usize = 64; +pub const SESSION_UPDATE_CHANNEL_CAP: usize = 64; #[derive(Debug, Clone)] pub struct ActorHandle { @@ -48,22 +52,11 @@ impl ActorHandle { create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, ) -> ActorHandle { - let (handle, events_rx) = Self::spawn_with_events(create_store, me); - // drop all events - tokio::task::spawn(async move { while events_rx.recv_async().await.is_ok() {} }); - handle - } - - pub fn spawn_with_events( - create_store: impl 'static + Send + FnOnce() -> S, - me: NodeId, - ) -> (ActorHandle, flume::Receiver) { let (tx, rx) = flume::bounded(INBOX_CAP); - let (session_event_tx, session_event_rx) = flume::bounded(SESSION_EVENT_CAP); let join_handle = std::thread::Builder::new() .name("willow-actor".to_string()) .spawn(move || { - let span = error_span!("willow_thread", me=%me.fmt_short()); + let span = error_span!("willow-actor", me=%me.fmt_short()); let _guard = span.enter(); let store = (create_store)(); @@ -75,24 +68,19 @@ impl ActorHandle { next_session_id: 0, session_tasks: Default::default(), tasks: Default::default(), - session_event_tx, }; if let Err(error) = actor.run() { - error!(?error, "storage thread failed"); + error!(?error, "willow actor failed"); }; }) - .expect("failed to spawn thread"); + .expect("failed to spawn willow-actor thread"); let join_handle = Arc::new(Some(join_handle)); - (ActorHandle { tx, join_handle }, session_event_rx) + ActorHandle { tx, join_handle } } pub async fn send(&self, action: ToActor) -> Result<()> { self.tx.send_async(action).await?; Ok(()) } - pub fn send_blocking(&self, action: ToActor) -> Result<()> { - self.tx.send(action)?; - Ok(()) - } pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::IngestEntry { @@ -104,6 +92,7 @@ impl ActorHandle { reply_rx.await??; Ok(()) } + pub async fn insert_entry(&self, entry: Entry, auth: impl Into) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InsertEntry { @@ -225,14 +214,21 @@ impl ActorHandle { .await?; reply_rx.await? } + + pub async fn shutdown(&self) -> Result<()> { + let (reply, reply_rx) = oneshot::channel(); + self.send(ToActor::Shutdown { reply: Some(reply) }).await?; + reply_rx.await?; + Ok(()) + } } impl Drop for ActorHandle { fn drop(&mut self) { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { + let handle = handle.take().expect("can only drop once"); self.tx.send(ToActor::Shutdown { reply: None }).ok(); - let handle = handle.take().expect("may only run once"); if let Err(err) = handle.join() { warn!(?err, "Failed to join sync actor"); } @@ -242,46 +238,31 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { - session_id: SessionId, - on_finish: future::Shared>>>, - cancel_token: CancellationToken, - update_tx: flume::Sender, + pub session_id: SessionId, + pub cancel_token: CancellationToken, + pub update_tx: mpsc::Sender, + pub event_rx: mpsc::Receiver, } impl SessionHandle { - fn new( - session_id: SessionId, - cancel_token: CancellationToken, - on_finish: oneshot::Receiver>>, - update_tx: flume::Sender, - ) -> Self { - let on_finish = on_finish - .map(|r| match r { - Ok(res) => res, - Err(_) => Err(Arc::new(Error::ActorFailed)), - }) - .boxed() - .shared(); - SessionHandle { - session_id, - on_finish, - cancel_token, - update_tx, - } - } - pub fn session_id(&self) -> SessionId { self.session_id } + /// Wait for the session to finish. /// /// Returns an error if the session failed to complete. - pub async fn on_finish(&self) -> Result<(), Arc> { - self.on_finish.clone().await + pub async fn on_finish(&mut self) -> Result<(), Arc> { + while let Some(event) = self.event_rx.recv().await { + if let EventKind::Closed { result } = event { + return result; + } + } + Err(Arc::new(Error::ActorFailed)) } pub async fn send_update(&self, update: SessionUpdate) -> Result<()> { - self.update_tx.send_async(update).await?; + self.update_tx.send(update).await?; Ok(()) } @@ -306,11 +287,6 @@ pub enum ToActor { init: SessionInit, reply: oneshot::Sender>, }, - // UpdateSession { - // session_id: SessionId, - // interests: Interests, - // reply: oneshot::Sender>, - // }, GetEntries { namespace: NamespaceId, range: ThreeDRange, @@ -364,8 +340,8 @@ pub enum ToActor { struct ActiveSession { #[allow(unused)] peer: NodeId, - on_finish: oneshot::Sender>>, task_key: TaskKey, // state: SharedSessionState + event_tx: mpsc::Sender, } #[derive(Debug)] @@ -376,7 +352,6 @@ pub struct Actor { sessions: HashMap, session_tasks: JoinMap>, tasks: JoinSet<()>, - session_event_tx: flume::Sender, } impl Actor { @@ -439,35 +414,41 @@ impl Actor { init, reply, } => { - let id = self.next_session_id(); + let session_id = self.next_session_id(); let store = self.store.clone(); let cancel_token = CancellationToken::new(); - let event_sender = EventSender::new(id, self.session_event_tx.clone()); - let (update_tx, update_rx) = flume::bounded(16); + + let (update_tx, update_rx) = mpsc::channel(SESSION_UPDATE_CHANNEL_CAP); + let (event_tx, event_rx) = mpsc::channel(SESSION_EVENT_CHANNEL_CAP); + let update_rx = tokio_stream::wrappers::ReceiverStream::new(update_rx); let future = run_session( store, channels, cancel_token.clone(), - id, + session_id, our_role, init, initial_transmission, - event_sender, + EventSender(event_tx.clone()), update_rx, ) .instrument(error_span!("session", peer = %peer.fmt_short())); - let task_key = self.session_tasks.spawn_local(id, future); + let task_key = self.session_tasks.spawn_local(session_id, future); - let (on_finish_tx, on_finish_rx) = oneshot::channel(); let active_session = ActiveSession { - on_finish: on_finish_tx, + event_tx, task_key, peer, }; - self.sessions.insert(id, active_session); - let handle = SessionHandle::new(id, cancel_token, on_finish_rx, update_tx); + self.sessions.insert(session_id, active_session); + let handle = SessionHandle { + session_id, + cancel_token, + update_tx, + event_rx, + }; send_reply(reply, Ok(handle)) } ToActor::GetEntries { @@ -551,13 +532,11 @@ impl Actor { Ok(()) => Ok(()), Err(err) => Err(Arc::new(err)), }; - // TODO: remove - session.on_finish.send(result.clone()).ok(); - self.session_event_tx - .send_async(SessionEvent::new(*session_id, EventKind::Closed { result })) + session + .event_tx + .send(EventKind::Closed { result }) .await .ok(); - self.session_tasks.remove(&session.task_key); } else { warn!("remove_session called for unknown session"); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 1ebe72a7d1..b58513d6d8 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -321,6 +321,7 @@ mod tests { use iroh_base::key::SecretKey; use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; + use rand_chacha::ChaCha12Rng; use tracing::info; use crate::{ @@ -339,10 +340,15 @@ mod tests { const ALPN: &[u8] = b"iroh-willow/0"; + fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + #[tokio::test(flavor = "multi_thread")] - async fn smoke() -> anyhow::Result<()> { + async fn net_smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let mut rng = create_rng("net_smoke"); let n_betty = parse_env_var("N_BETTY", 100); let n_alfie = parse_env_var("N_ALFIE", 100); @@ -444,9 +450,9 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - async fn live_data() -> anyhow::Result<()> { + async fn net_live_data() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let mut rng = create_rng("net_live_data"); let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 114b4305d3..bcbab75f2e 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -31,7 +31,7 @@ impl AoiIntersection { #[derive(Debug, Default, Clone)] pub struct AoiFinder(Rc>); -pub type AoiIntersectionQueue = flume::Receiver; +pub type AoiIntersectionReceiver = flume::Receiver; #[derive(Debug, Default)] struct Inner { @@ -68,7 +68,7 @@ impl AoiFinder { } pub fn subscribe(&self) -> flume::Receiver { - let (tx, rx) = flume::bounded(128); + let (tx, rx) = flume::bounded(2); self.0.borrow_mut().subscribers.push(tx); rx } diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index bda0b1327d..e78dcb1c0f 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -6,7 +6,7 @@ use crate::{ willow::AuthorisedEntry, }, session::{ - aoi_finder::AoiIntersectionQueue, channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, + aoi_finder::AoiIntersectionReceiver, channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, SessionId, }, store::{ @@ -22,7 +22,7 @@ use super::payload::{send_payload_chunked, CurrentPayload}; pub struct DataSender { store: Store, send: ChannelSenders, - aoi_queue: AoiIntersectionQueue, + aoi_queue: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, } @@ -31,7 +31,7 @@ impl DataSender { pub fn new( store: Store, send: ChannelSenders, - aoi_queue: AoiIntersectionQueue, + aoi_queue: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, ) -> Self { diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs index ec3e161422..f5570a9a22 100644 --- a/iroh-willow/src/session/events.rs +++ b/iroh-willow/src/session/events.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, BTreeSet, HashMap}, + collections::{hash_map, HashMap, HashSet}, sync::Arc, }; @@ -11,9 +11,11 @@ use iroh_net::{ }; use tokio::{ io::Interest, - sync::oneshot, + sync::{mpsc, oneshot}, task::{AbortHandle, JoinHandle, JoinSet}, }; +use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; +use tokio_util::sync::CancellationToken; use tracing::{error_span, Instrument}; use crate::{ @@ -25,39 +27,34 @@ use crate::{ keys::NamespaceId, sync::{ReadAuthorisation, ReadCapability}, }, - session::{Error, Interests, Role, SessionId, SessionInit, SessionMode}, + session::{Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate}, store::traits::Storage, }; use super::SessionUpdate::AddInterests; -type NamespaceInterests = HashMap>; +type NamespaceInterests = HashMap>; const COMMAND_CHANNEL_CAP: usize = 128; +const INTENT_UPDATE_CAP: usize = 16; +const INTENT_EVENT_CAP: usize = 64; #[derive(Debug, Clone)] -pub struct EventSender { - session_id: SessionId, - sender: flume::Sender, -} +pub struct EventSender(pub mpsc::Sender); impl EventSender { - pub fn new(session_id: SessionId, sender: flume::Sender) -> Self { - Self { session_id, sender } - } pub async fn send(&self, event: EventKind) -> Result<(), Error> { - self.sender - .send_async(SessionEvent::new(self.session_id, event)) + self.0 + .send(event) .await - .map_err(|_| Error::InvalidState("session event receiver dropped"))?; - Ok(()) + .map_err(|_| Error::InvalidState("session event receiver dropped")) } } #[derive(Debug, Clone)] pub struct SessionEvent { - session_id: SessionId, - event: EventKind, + pub session_id: SessionId, + pub event: EventKind, } impl SessionEvent { @@ -97,11 +94,11 @@ impl EventKind { } } -// #[derive(Debug, Clone)] -// pub struct SyncEvent { -// peer: NodeId, -// event: EventKind, -// } +#[derive(Debug)] +pub enum IntentUpdate { + AddInterests(Interests), + Close, +} #[derive(Debug)] pub enum Command { @@ -110,16 +107,6 @@ pub enum Command { init: SessionInit, reply: oneshot::Sender>, }, - UpdateIntent { - peer: NodeId, - intent_id: u64, - add_interests: Interests, - reply: oneshot::Sender>, - }, - CancelIntent { - peer: NodeId, - intent_id: u64, - }, HandleConnection { conn: Connection, }, @@ -128,7 +115,7 @@ pub enum Command { #[derive(Debug, Clone)] pub struct ManagedHandle { actor: ActorHandle, - command_tx: flume::Sender, + command_tx: mpsc::Sender, _task_handle: SharedAbortingJoinHandle>, } @@ -138,12 +125,12 @@ impl ManagedHandle { create_store: impl 'static + Send + FnOnce() -> S, ) -> Self { let me = endpoint.node_id(); - let (actor, event_rx) = ActorHandle::spawn_with_events(create_store, me); - let (command_tx, command_rx) = flume::bounded(COMMAND_CHANNEL_CAP); + let actor = ActorHandle::spawn(create_store, me); + let (command_tx, command_rx) = mpsc::channel(COMMAND_CHANNEL_CAP); let peer_manager = PeerManager { - event_rx, + session_event_rx: Default::default(), + intent_update_rx: Default::default(), command_rx, - command_tx: command_tx.clone(), establish_tasks: Default::default(), net_tasks: Default::default(), actor: actor.clone(), @@ -166,7 +153,7 @@ impl ManagedHandle { pub async fn handle_connection(&self, conn: Connection) -> Result<()> { self.command_tx - .send_async(Command::HandleConnection { conn }) + .send(Command::HandleConnection { conn }) .await?; Ok(()) } @@ -174,7 +161,7 @@ impl ManagedHandle { pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { let (reply, reply_rx) = oneshot::channel(); self.command_tx - .send_async(Command::SyncWithPeer { peer, init, reply }) + .send(Command::SyncWithPeer { peer, init, reply }) .await?; reply_rx.await? } @@ -188,271 +175,64 @@ impl std::ops::Deref for ManagedHandle { } } -type EstablishRes = (NodeId, Result<(JoinSet>, SessionHandle)>); +type NetTasks = JoinSet>; -#[derive(Debug)] +type EstablishRes = (NodeId, Result<(NetTasks, SessionHandle)>); + +pub type IntentId = (NodeId, u64); + +#[derive(derive_more::Debug)] pub struct PeerManager { - event_rx: flume::Receiver, - command_rx: flume::Receiver, - command_tx: flume::Sender, + session_event_rx: StreamMap>, + #[debug("StreamMap")] + intent_update_rx: StreamMap>>, + command_rx: mpsc::Receiver, establish_tasks: JoinSet, net_tasks: JoinSet<(NodeId, Result<()>)>, actor: ActorHandle, peers: HashMap, - // auth: Auth, sessions: HashMap, - // intents: HashMap, endpoint: Endpoint, dialer: Dialer, next_intent_id: u64, } -#[derive(Debug)] -struct SessionInfo { - // peer: NodeId, - our_role: Role, - complete_areas: NamespaceInterests, - submitted_interests: InterestMap, - intents: Vec, - handle: SessionHandle, - net_error: Option, -} - -impl SessionInfo { - async fn push_interests(&mut self, interests: InterestMap) -> Result<()> { - let new_interests = self.merge_interests(interests); - self.handle - .send_update(AddInterests(Interests::Exact(new_interests))) - .await?; - Ok(()) - } - // TODO: Less clones? - fn merge_interests(&mut self, interests: InterestMap) -> InterestMap { - let mut new: InterestMap = HashMap::new(); - for (auth, aois) in interests.into_iter() { - match self.submitted_interests.entry(auth.clone()) { - hash_map::Entry::Vacant(entry) => { - entry.insert(aois.clone()); - new.insert(auth, aois.clone()); - } - hash_map::Entry::Occupied(mut entry) => { - let existing = entry.get_mut(); - for aoi in aois { - if !existing.contains(&aoi) { - existing.insert(aoi.clone()); - new.entry(auth.clone()).or_default().insert(aoi); - } - } - } - } - } - new - // for (namespace, details) in interests.into_iter() { - // let namespace = *namespace; - // match self.submitted_interests.entry(namespace) { - // hash_map::Entry::Vacant(entry) => { - // entry.insert(details.clone()); - // new.insert(namespace, details.clone()); - // } - // hash_map::Entry::Occupied(mut entry) => { - // let existing = entry.get_mut(); - // for aoi in details.aois { - // if !existing.aois.contains(&aoi) { - // existing.aois.insert(aoi.clone()); - // new.entry(namespace).or_default().aois.insert(aoi); - // } - // } - // for auth in details.auths { - // if !existing.auths.contains(&auth) { - // existing.auths.insert(auth.clone()); - // new.entry(namespace).or_default().auths.insert(auth); - // } - // } - // } - // } - // } - } -} - -#[derive(Debug)] -struct IntentInfo { - // peer: NodeId, - intent_id: u64, - interests: NamespaceInterests, - mode: SessionMode, - sender: flume::Sender, -} - -#[derive(Debug)] -pub struct IntentHandle { - peer: NodeId, - intent_id: u64, - receiver: flume::Receiver, - sender: flume::Sender, -} - -impl IntentHandle { - // TODO: impl stream - pub async fn next(&self) -> Option { - self.receiver.recv_async().await.ok() - } - - pub async fn complete(&self) -> Result<(), Arc> { - loop { - let event = self - .receiver - .recv_async() - .await - .map_err(|_| Arc::new(Error::ActorFailed))?; - if let EventKind::Closed { result } = event { - return result; - } - } - } - - pub async fn add_interests(&self, interests: impl Into) -> Result<()> { - let (reply, reply_rx) = oneshot::channel(); - self.sender - .send_async(Command::UpdateIntent { - peer: self.peer, - intent_id: self.intent_id, - add_interests: interests.into(), - reply, - }) - .await?; - reply_rx.await? - } -} - -#[derive(Debug)] -enum PeerState { - Connecting { - intents: Vec, - interests: InterestMap, - }, - Establishing { - our_role: Role, - intents: Vec, - submitted_interests: InterestMap, - pending_interests: InterestMap, - }, - Active { - session_id: SessionId, - }, - // Closing { - // session_id: SessionId, - // }, - Placeholder, -} - -impl PeerState { - pub fn into_intents(self) -> Option> { - match self { - PeerState::Connecting { intents, .. } => Some(intents), - PeerState::Establishing { intents, .. } => Some(intents), - _ => None, - } - } -} - -impl IntentInfo { - fn merge_interests(&mut self, interests: &InterestMap) { - for (auth, aois) in interests.iter() { - self.interests - .entry(auth.namespace()) - .or_default() - .extend(aois.clone()); - } - } - - async fn handle_event(&mut self, event: &EventKind) -> Result { - let send = |event: EventKind| async { - self.sender - .send_async(event) - .await - .map_err(|_| ReceiverDropped) - }; - - let stay_alive = match &event { - EventKind::CapabilityIntersection { namespace, .. } => { - if self.interests.contains_key(namespace) { - send(event.clone()).await?; - } - true - } - EventKind::InterestIntersection { area, namespace } => { - if let Some(interests) = self.interests.get(namespace) { - let matches = interests - .iter() - .any(|x| x.area.has_intersection(&area.area)); - if matches { - send(event.clone()).await?; - } - } - true - } - EventKind::Reconciled { area, namespace } => { - if let Some(interests) = self.interests.get_mut(namespace) { - let matches = interests - .iter() - .any(|x| x.area.has_intersection(&area.area)); - if matches { - send(event.clone()).await?; - interests.retain(|x| !area.area.includes_area(&x.area)); - if interests.is_empty() { - send(EventKind::ReconciledAll).await?; - } - } - } - true - } - EventKind::Closed { .. } => { - send(event.clone()).await?; - false - } - EventKind::ReconciledAll => true, - }; - Ok(stay_alive) - } -} - -#[derive(Debug, thiserror::Error)] -#[error("receiver dropped")] -pub struct ReceiverDropped; - impl PeerManager { pub async fn run(mut self) -> Result<(), Error> { loop { tokio::select! { - Ok(event) = self.event_rx.recv_async() => { - self.received_event(event).await; + Some((session_id, event)) = self.session_event_rx.next(), if !self.session_event_rx.is_empty() => { + self.received_event(session_id, event).await; } - Ok(command) = self.command_rx.recv_async() => { + Some(((peer, intent_id), event)) = self.intent_update_rx.next(), if !self.intent_update_rx.is_empty() => { + if let Some(event) = event { + // Received an intent update. + if let Err(err) = self.update_intent(peer, intent_id, event).await { + tracing::warn!(peer=%peer.fmt_short(), %intent_id, ?err, "failed to update intent"); + } + } else { + // The intent update sender was dropped: Cancel the intent. + self.cancel_intent(peer, intent_id); + } + } + Some(command) = self.command_rx.recv() => { self.received_command(command).await; } Some(res) = self.establish_tasks.join_next(), if !self.establish_tasks.is_empty() => { - let res = match res { - Ok(res) => res, - Err(err) if err.is_cancelled() => { - continue; - }, + match res { + Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("establish task paniced")?, - }; - self.on_established(res).await?; - + Ok((peer, Ok((tasks, handle)))) => self.on_established(peer, handle, tasks).await?, + Ok((peer, Err(err))) => self.remove_peer(peer, Err(Arc::new(Error::Net(err)))).await, + } } Some(res) = self.net_tasks.join_next(), if !self.net_tasks.is_empty() => { match res { - Err(err) if err.is_cancelled() => { - continue; - }, + Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("net task paniced")?, - Ok((peer, res)) => { - if let Err(err) = res { - self.on_conn_fail(peer, err); - } - } + Ok((_peer, Ok(())))=> continue, + Ok((peer, Err(err))) => self.on_net_task_failed(peer, err), } }, Some((peer, conn)) = self.dialer.next() => { @@ -468,29 +248,41 @@ impl PeerManager { Ok(()) } - async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { + async fn remove_peer(&mut self, peer: NodeId, result: Result<(), Arc>) { let Some(peer_state) = self.peers.remove(&peer) else { - tracing::warn!(?peer, "dialer returned connection error for unknown peer"); + tracing::warn!(?peer, "attempted to remove unknown peer"); return; }; - let PeerState::Connecting { intents, .. } = peer_state else { - tracing::warn!( - ?peer, - "dialer returned connection error for peer in wrong state" - ); - return; + let (intents, session_id) = match peer_state { + PeerState::Connecting { intents, .. } => { + self.dialer.abort_dial(&peer); + (Some(intents), None) + } + PeerState::Establishing { intents, .. } => (Some(intents), None), + PeerState::Active { session_id } => { + let session = self.sessions.remove(&session_id); + let intents = session.map(|session| session.intents); + (intents, Some(session_id)) + } + PeerState::Placeholder => unreachable!(), }; - let result = Err(Arc::new(Error::Net(err))); - for intent in intents { - let result = result.clone(); - intent - .sender - .send_async(EventKind::Closed { result }) - .await - .ok(); + if let Some(intents) = intents { + for intent in &intents { + self.intent_update_rx.remove(&(peer, intent.intent_id)); + } + let senders = intents.into_iter().map(|intent| intent.event_tx); + send_all(senders, EventKind::Closed { result }).await; + } + if let Some(session_id) = session_id { + self.session_event_rx.remove(&session_id); } } + async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { + let result = Err(Arc::new(Error::Net(err))); + self.remove_peer(peer, result).await; + } + fn session_mut(&mut self, peer: &NodeId) -> Option<&mut SessionInfo> { let peer_state = self.peers.get(peer)?; match peer_state { @@ -499,7 +291,7 @@ impl PeerManager { } } - fn on_conn_fail(&mut self, peer: NodeId, err: anyhow::Error) { + fn on_net_task_failed(&mut self, peer: NodeId, err: anyhow::Error) { if let Some(session) = self.session_mut(&peer) { if session.net_error.is_none() { session.net_error = Some(err); @@ -507,8 +299,12 @@ impl PeerManager { } } - async fn on_established(&mut self, res: EstablishRes) -> anyhow::Result<()> { - let (peer, res) = res; + async fn on_established( + &mut self, + peer: NodeId, + session_handle: SessionHandle, + mut net_tasks: NetTasks, + ) -> anyhow::Result<()> { let peer_state = self .peers .get_mut(&peer) @@ -523,38 +319,35 @@ impl PeerManager { else { anyhow::bail!("unreachable: on_established called for peer in wrong state") }; - match res { - Ok((mut net_tasks, session_handle)) => { - if our_role.is_alfie() && intents.is_empty() { - session_handle.close(); - } - let session_id = session_handle.session_id(); - self.net_tasks.spawn( - async move { crate::net::join_all(&mut net_tasks).await } - .map(move |r| (peer, r)), - ); - let mut session_info = SessionInfo { - our_role, - complete_areas: Default::default(), - submitted_interests, - intents, - handle: session_handle, - net_error: None, - }; - if !pending_interests.is_empty() { - session_info.push_interests(pending_interests).await?; - } - self.sessions.insert(session_id, session_info); - *peer_state = PeerState::Active { session_id }; - } - Err(err) => { - tracing::warn!(?peer, ?err, "establishing session failed"); - let result = Err(Arc::new(Error::Net(err))); - let senders = intents.into_iter().map(|intent| intent.sender); - send_all(senders, EventKind::Closed { result }).await; - self.peers.remove(&peer); - } + if our_role.is_alfie() && intents.is_empty() { + session_handle.close(); + } + let SessionHandle { + session_id, + cancel_token, + update_tx, + event_rx, + } = session_handle; + self.net_tasks.spawn( + async move { crate::net::join_all(&mut net_tasks).await }.map(move |r| (peer, r)), + ); + let mut session_info = SessionInfo { + peer, + our_role, + complete_areas: Default::default(), + submitted_interests, + intents, + net_error: None, + update_tx, + cancel_token, + }; + if !pending_interests.is_empty() { + session_info.push_interests(pending_interests).await?; } + self.sessions.insert(session_id, session_info); + self.session_event_rx + .insert(session_id, ReceiverStream::new(event_rx)); + *peer_state = PeerState::Active { session_id }; Ok(()) } @@ -565,22 +358,31 @@ impl PeerManager { ) -> Result { let intent_interests = self.actor.resolve_interests(init.interests).await?; // TODO: Allow to configure cap? - let (sender, receiver) = flume::bounded(64); + let (event_tx, event_rx) = mpsc::channel(INTENT_EVENT_CAP); + let (update_tx, update_rx) = mpsc::channel(INTENT_UPDATE_CAP); let intent_id = { let intent_id = self.next_intent_id; self.next_intent_id += 1; intent_id }; - let intent_info = IntentInfo { + let info = IntentInfo { intent_id, interests: flatten_interests(&intent_interests), mode: init.mode, - sender, + event_tx, }; + let handle = IntentHandle { + event_rx, + update_tx, + }; + self.intent_update_rx.insert( + (peer, intent_id), + StreamNotifyClose::new(ReceiverStream::new(update_rx)), + ); match self.peers.get_mut(&peer) { None => { self.dialer.queue_dial(peer, ALPN); - let intents = vec![intent_info]; + let intents = vec![info]; let peer_state = PeerState::Connecting { intents, interests: intent_interests, @@ -589,7 +391,7 @@ impl PeerManager { } Some(state) => match state { PeerState::Connecting { intents, interests } => { - intents.push(intent_info); + intents.push(info); merge_interests(interests, intent_interests); } PeerState::Establishing { @@ -597,23 +399,17 @@ impl PeerManager { pending_interests, .. } => { - intents.push(intent_info); + intents.push(info); merge_interests(pending_interests, intent_interests); } PeerState::Active { session_id, .. } => { let session = self.sessions.get_mut(session_id).expect("session to exist"); - session.intents.push(intent_info); + session.intents.push(info); session.push_interests(intent_interests).await?; } PeerState::Placeholder => unreachable!(), }, }; - let handle = IntentHandle { - peer, - receiver, - intent_id, - sender: self.command_tx.clone(), - }; Ok(handle) } @@ -621,47 +417,55 @@ impl PeerManager { &mut self, peer: NodeId, intent_id: u64, - add_interests: Interests, + update: IntentUpdate, ) -> Result<()> { - let add_interests = self.actor.resolve_interests(add_interests).await?; - match self.peers.get_mut(&peer) { - None => anyhow::bail!("invalid node id"), - Some(peer_state) => match peer_state { - PeerState::Connecting { intents, interests } => { - let Some(intent_info) = intents.iter_mut().find(|i| i.intent_id == intent_id) - else { - anyhow::bail!("invalid intent id"); - }; - intent_info.merge_interests(&add_interests); - merge_interests(interests, add_interests); - } - PeerState::Establishing { - intents, - pending_interests, - .. - } => { - let intent_info = intents - .iter_mut() - .find(|i| i.intent_id == intent_id) - .ok_or_else(|| anyhow!("invalid intent id"))?; - intent_info.merge_interests(&add_interests); - merge_interests(pending_interests, add_interests); - } - PeerState::Active { session_id, .. } => { - let session = self.sessions.get_mut(session_id).expect("session to exist"); - let Some(intent_info) = session - .intents - .iter_mut() - .find(|i| i.intent_id == intent_id) - else { - anyhow::bail!("invalid intent id"); - }; - intent_info.merge_interests(&add_interests); - session.push_interests(add_interests).await?; - } - PeerState::Placeholder => unreachable!(), - }, - }; + match update { + IntentUpdate::AddInterests(interests) => { + let add_interests = self.actor.resolve_interests(interests).await?; + match self.peers.get_mut(&peer) { + None => anyhow::bail!("invalid node id"), + Some(peer_state) => match peer_state { + PeerState::Connecting { intents, interests } => { + let intent_info = intents + .iter_mut() + .find(|i| i.intent_id == intent_id) + .ok_or_else(|| anyhow!("invalid intent id"))?; + intent_info.merge_interests(&add_interests); + merge_interests(interests, add_interests); + } + PeerState::Establishing { + intents, + pending_interests, + .. + } => { + let intent_info = intents + .iter_mut() + .find(|i| i.intent_id == intent_id) + .ok_or_else(|| anyhow!("invalid intent id"))?; + intent_info.merge_interests(&add_interests); + merge_interests(pending_interests, add_interests); + } + PeerState::Active { session_id, .. } => { + let session = + self.sessions.get_mut(session_id).expect("session to exist"); + let Some(intent_info) = session + .intents + .iter_mut() + .find(|i| i.intent_id == intent_id) + else { + anyhow::bail!("invalid intent id"); + }; + intent_info.merge_interests(&add_interests); + session.push_interests(add_interests).await?; + } + PeerState::Placeholder => unreachable!(), + }, + }; + } + IntentUpdate::Close => { + self.cancel_intent(peer, intent_id); + } + } Ok(()) } @@ -670,6 +474,8 @@ impl PeerManager { return; }; + self.intent_update_rx.remove(&(peer, intent_id)); + match peer_state { PeerState::Connecting { intents, .. } => { intents.retain(|intent_info| intent_info.intent_id != intent_id); @@ -687,7 +493,7 @@ impl PeerManager { .intents .retain(|intent| intent.intent_id != intent_id); if session.intents.is_empty() { - session.handle.close(); + session.cancel_token.cancel(); } } PeerState::Placeholder => unreachable!(), @@ -699,60 +505,46 @@ impl PeerManager { match command { Command::SyncWithPeer { peer, init, reply } => { let res = self.sync_with_peer(peer, init).await; - // TODO: Cancel intent if reply send fails? reply.send(res).ok(); } - Command::UpdateIntent { - peer, - intent_id, - add_interests, - reply, - } => { - let res = self.update_intent(peer, intent_id, add_interests).await; - // TODO: Cancel intent if reply send fails? - reply.send(res).ok(); - } - Command::CancelIntent { peer, intent_id } => { - self.cancel_intent(peer, intent_id); - } Command::HandleConnection { conn } => { self.handle_connection(conn, Role::Betty).await; } } } - pub async fn received_event(&mut self, mut event: SessionEvent) { + pub async fn received_event(&mut self, session_id: SessionId, event: EventKind) { tracing::info!(?event, "event"); - let Some(session) = self.sessions.get_mut(&event.session_id) else { - tracing::warn!(?event, "Got event for unknown session"); + let Some(session) = self.sessions.get_mut(&session_id) else { + tracing::warn!(?session_id, ?event, "Got event for unknown session"); return; }; - let mut is_closed = false; - match &mut event.event { - EventKind::Reconciled { namespace, area } => { - session - .complete_areas - .entry(*namespace) - .or_default() - .insert(area.clone()); - } - EventKind::Closed { result } => { - is_closed = true; - if result.is_ok() { - // Inject error from networking tasks. - if let Some(net_error) = session.net_error.take() { - *result = Err(Arc::new(Error::Net(net_error))); - } + let peer = session.peer; + + if let EventKind::Closed { mut result } = event { + if result.is_ok() { + // Inject error from networking tasks. + if let Some(net_error) = session.net_error.take() { + result = Err(Arc::new(Error::Net(net_error))); } } - _ => {} + self.remove_peer(peer, result).await; + return; + } + + if let EventKind::Reconciled { namespace, area } = &event { + session + .complete_areas + .entry(*namespace) + .or_default() + .insert(area.clone()); } let send_futs = session .intents .iter_mut() - .map(|intent_info| intent_info.handle_event(&event.event)); + .map(|intent_info| intent_info.handle_event(&event)); let send_res = futures_buffered::join_all(send_futs).await; let mut removed = 0; for (i, res) in send_res.into_iter().enumerate() { @@ -765,14 +557,9 @@ impl PeerManager { } } - if session.our_role.is_alfie() && session.intents.is_empty() && !is_closed { - session.handle.close(); - } - - if is_closed { - debug_assert!(session.intents.is_empty()); - // TODO: Wait for net tasks to terminate? - self.sessions.remove(&event.session_id); + // Cancel the session if all intents are gone. + if session.our_role.is_alfie() && session.intents.is_empty() { + session.cancel_token.cancel(); } } @@ -786,15 +573,11 @@ impl PeerManager { }; if let Err(err) = self.handle_connection_inner(peer, conn, our_role).await { tracing::warn!(?peer, ?err, "failed to establish connection"); - if let Some(peer_state) = self.peers.remove(&peer) { - if let Some(intents) = peer_state.into_intents() { - let result = Err(Arc::new(Error::Net(err))); - let senders = intents.into_iter().map(|intent| intent.sender); - send_all(senders, EventKind::Closed { result }).await; - } - } + let result = Err(Arc::new(Error::Net(err))); + self.remove_peer(peer, result).await; } } + async fn handle_connection_inner( &mut self, peer: NodeId, @@ -810,10 +593,12 @@ impl PeerManager { match peer_state { PeerState::Placeholder => unreachable!(), PeerState::Active { .. } => { - anyhow::bail!("got connection for already active peer"); + tracing::warn!("got connection for already active peer"); + return Ok(()); } PeerState::Establishing { .. } => { - anyhow::bail!("got connection for already establishing peer"); + tracing::warn!("got connection for already establishing peer"); + return Ok(()); } PeerState::Connecting { intents, interests } => { let mode = if intents.iter().any(|i| matches!(i.mode, SessionMode::Live)) { @@ -831,10 +616,12 @@ impl PeerManager { match peer_state { PeerState::Placeholder => unreachable!(), PeerState::Active { .. } => { - anyhow::bail!("got connection for already active peer"); + tracing::warn!("got connection for already active peer"); + return Ok(()); } PeerState::Establishing { .. } => { - anyhow::bail!("got connection for already establishing peer"); + tracing::warn!("got connection for already establishing peer"); + return Ok(()); } PeerState::Connecting { intents, .. } => { // TODO: Decide which conn to use. @@ -876,6 +663,175 @@ impl PeerManager { } } +#[derive(Debug)] +struct SessionInfo { + peer: NodeId, + our_role: Role, + complete_areas: NamespaceInterests, + submitted_interests: InterestMap, + intents: Vec, + net_error: Option, + cancel_token: CancellationToken, + update_tx: mpsc::Sender, +} + +impl SessionInfo { + async fn push_interests(&mut self, interests: InterestMap) -> Result<()> { + let new_interests = self.merge_interests(interests); + self.update_tx + .send(AddInterests(Interests::Exact(new_interests))) + .await?; + Ok(()) + } + + fn merge_interests(&mut self, interests: InterestMap) -> InterestMap { + let mut new: InterestMap = HashMap::new(); + for (auth, aois) in interests.into_iter() { + match self.submitted_interests.entry(auth.clone()) { + hash_map::Entry::Vacant(entry) => { + entry.insert(aois.clone()); + new.insert(auth, aois); + } + hash_map::Entry::Occupied(mut entry) => { + let existing = entry.get_mut(); + for aoi in aois { + if !existing.contains(&aoi) { + existing.insert(aoi.clone()); + new.entry(auth.clone()).or_default().insert(aoi); + } + } + } + } + } + new + } +} + +#[derive(Debug)] +enum PeerState { + Connecting { + intents: Vec, + interests: InterestMap, + }, + Establishing { + our_role: Role, + intents: Vec, + submitted_interests: InterestMap, + pending_interests: InterestMap, + }, + Active { + session_id: SessionId, + }, + Placeholder, +} + +#[derive(Debug)] +pub struct IntentHandle { + event_rx: mpsc::Receiver, + update_tx: mpsc::Sender, +} + +impl IntentHandle { + // TODO: impl stream + pub async fn next(&mut self) -> Option { + self.event_rx.recv().await + } + + pub async fn complete(&mut self) -> Result<(), Arc> { + loop { + let event = self + .event_rx + .recv() + .await + .ok_or_else(|| Arc::new(Error::ActorFailed))?; + if let EventKind::Closed { result } = event { + return result; + } + } + } + + pub async fn add_interests(&self, interests: impl Into) -> Result<()> { + self.update_tx + .send(IntentUpdate::AddInterests(interests.into())) + .await?; + Ok(()) + } + + pub async fn close(&self) { + self.update_tx.send(IntentUpdate::Close).await.ok(); + } +} + +#[derive(Debug)] +struct IntentInfo { + intent_id: u64, + interests: NamespaceInterests, + mode: SessionMode, + event_tx: mpsc::Sender, +} + +impl IntentInfo { + fn merge_interests(&mut self, interests: &InterestMap) { + for (auth, aois) in interests.iter() { + self.interests + .entry(auth.namespace()) + .or_default() + .extend(aois.clone()); + } + } + + async fn handle_event(&mut self, event: &EventKind) -> Result { + let send = |event: EventKind| async { + self.event_tx.send(event).await.map_err(|_| ReceiverDropped) + }; + + let stay_alive = match &event { + EventKind::CapabilityIntersection { namespace, .. } => { + if self.interests.contains_key(namespace) { + send(event.clone()).await?; + } + true + } + EventKind::InterestIntersection { area, namespace } => { + if let Some(interests) = self.interests.get(namespace) { + let matches = interests + .iter() + .any(|x| x.area.has_intersection(&area.area)); + if matches { + send(event.clone()).await?; + } + } + true + } + EventKind::Reconciled { area, namespace } => { + if let Some(interests) = self.interests.get_mut(namespace) { + let matches = interests + .iter() + .any(|x| x.area.has_intersection(&area.area)); + if matches { + send(event.clone()).await?; + interests.retain(|x| !area.area.includes_area(&x.area)); + if interests.is_empty() { + send(EventKind::ReconciledAll).await?; + } + } + } + true + } + EventKind::Closed { .. } => { + send(event.clone()).await?; + false + } + EventKind::ReconciledAll => true, + }; + Ok(stay_alive) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("receiver dropped")] +pub struct ReceiverDropped; + fn merge_interests(a: &mut InterestMap, b: InterestMap) { for (cap, aois) in b.into_iter() { a.entry(cap).or_default().extend(aois); @@ -891,12 +847,12 @@ fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { } async fn send_all( - senders: impl IntoIterator>, + senders: impl IntoIterator>>, message: T, -) -> Vec>> { +) -> Vec>> { let futs = senders.into_iter().map(|sender| { let message = message.clone(); - async move { sender.send_async(message).await } + async move { sender.borrow().send(message).await } }); futures_buffered::join_all(futs).await } @@ -906,7 +862,8 @@ mod tests { use bytes::Bytes; use iroh_net::{Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; - use std::collections::{BTreeMap, BTreeSet, HashMap}; + use rand_chacha::ChaCha12Rng; + use std::collections::HashMap; use super::{EventKind, ManagedHandle, ALPN}; use crate::{ @@ -923,10 +880,15 @@ mod tests { session::{Interests, Role, SessionInit, SessionMode}, }; + fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + #[tokio::test(flavor = "multi_thread")] async fn peer_manager_two_intents() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let mut rng = create_rng("peer_manager_two_intents"); let ( shutdown, namespace, @@ -945,10 +907,10 @@ mod tests { let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::CapabilityIntersection { namespace, area: Area::full(), @@ -956,7 +918,7 @@ mod tests { ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::InterestIntersection { namespace, area: Area::path(path.clone()).into() @@ -964,21 +926,21 @@ mod tests { ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Reconciled { namespace, area: Area::path(path.clone()).into() } ); - assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Closed { result: Ok(()) } ); - assert!(handle.next().await.is_none()); + assert!(intent.next().await.is_none()); } }); @@ -990,10 +952,10 @@ mod tests { let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::CapabilityIntersection { namespace, area: Area::full(), @@ -1001,7 +963,7 @@ mod tests { ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::InterestIntersection { namespace, area: Area::path(path.clone()).into() @@ -1009,21 +971,21 @@ mod tests { ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Reconciled { namespace, area: Area::path(path.clone()).into() } ); - assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Closed { result: Ok(()) } ); - assert!(handle.next().await.is_none()); + assert!(intent.next().await.is_none()); } }); @@ -1036,7 +998,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn peer_manager_update_intent() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); + let mut rng = create_rng("peer_manager_update_intent"); let ( shutdown, namespace, @@ -1051,52 +1013,59 @@ mod tests { let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::Live); - let handle = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::CapabilityIntersection { namespace, area: Area::full(), } ); - assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::InterestIntersection { namespace, area: Area::path(path.clone()).into() } ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Reconciled { namespace, area: Area::path(path.clone()).into() } ); - assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); let path = Path::new(&[b"bar"]).unwrap(); let interests = Interests::select().area(namespace, [Area::path(path.clone())]); - handle.add_interests(interests).await?; + intent.add_interests(interests).await?; assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::InterestIntersection { namespace, area: Area::path(path.clone()).into() } ); assert_eq!( - handle.next().await.unwrap(), + intent.next().await.unwrap(), EventKind::Reconciled { namespace, area: Area::path(path.clone()).into() } ); - assert_eq!(handle.next().await.unwrap(), EventKind::ReconciledAll); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + intent.close().await; + + assert!(intent.next().await.is_none(),); + // assert_eq!( + // intent.next().await.unwrap(), + // EventKind::Closed { result: Ok(()) } + // ); shutdown(); Ok(()) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 941ca88752..b1f0578b8d 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -11,7 +11,7 @@ use tracing::{debug, trace}; use crate::{ proto::{ - grouping::ThreeDRange, + grouping::{AreaOfInterest, ThreeDRange}, keys::NamespaceId, sync::{ AreaOfInterestHandle, Fingerprint, LengthyEntry, ReconciliationAnnounceEntries, @@ -21,7 +21,7 @@ use crate::{ willow::PayloadDigest, }, session::{ - aoi_finder::{AoiIntersection, AoiIntersectionQueue}, + aoi_finder::{AoiIntersection, AoiIntersectionReceiver}, channels::{ChannelSenders, MessageReceiver}, events::{EventKind, EventSender}, payload::{send_payload_chunked, CurrentPayload}, @@ -36,6 +36,18 @@ use crate::{ util::stream::Cancelable, }; +// pub enum Input { +// Received(ReconciliationMessage), +// Intersection(AoiIntersection), +// } +// +// pub enum Output { +// Reconciled { +// namespace: NamespaceId, +// area: AreaOfInterest, +// }, +// } + #[derive(derive_more::Debug)] pub struct Reconciler { shared: Shared, @@ -52,7 +64,7 @@ impl Reconciler { pub fn new( store: Store, recv: Cancelable>, - aoi_intersection_queue: AoiIntersectionQueue, + aoi_intersection_receiver: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, send: ChannelSenders, @@ -71,9 +83,8 @@ impl Reconciler { Ok(Self { shared, recv, - targets: TargetMap::new(aoi_intersection_queue), + targets: TargetMap::new(aoi_intersection_receiver), current_entry: Default::default(), - events, }) } @@ -96,9 +107,8 @@ impl Reconciler { } } } - Ok(intersection) = self.targets.aoi_intersection_queue.recv_async() => { + Ok(intersection) = self.targets.aoi_intersection_receiver.recv_async() => { tracing::trace!(?intersection, "tick: interesection"); - let intersection = intersection; let area = intersection.intersection.clone(); let namespace = intersection.namespace; self.targets.init_target(&self.shared, intersection).await?; @@ -203,14 +213,14 @@ impl Reconciler { #[derive(Debug)] struct TargetMap { map: HashMap>, - aoi_intersection_queue: AoiIntersectionQueue, + aoi_intersection_receiver: AoiIntersectionReceiver, } impl TargetMap { - pub fn new(aoi_intersection_queue: AoiIntersectionQueue) -> Self { + pub fn new(aoi_intersection_receiver: AoiIntersectionReceiver) -> Self { Self { map: Default::default(), - aoi_intersection_queue, + aoi_intersection_receiver, } } pub async fn get_eventually( @@ -231,7 +241,7 @@ impl TargetMap { ) -> Result<(), Error> { loop { let intersection = self - .aoi_intersection_queue + .aoi_intersection_receiver .recv_async() .await .map_err(|_| Error::InvalidState("aoi finder closed"))?; diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 0dbe038912..410dace4c9 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,9 +1,10 @@ use std::{cell::RefCell, collections::hash_map, rc::Rc}; use futures_concurrency::stream::StreamExt as _; -use futures_lite::StreamExt as _; +use futures_lite::{Stream, StreamExt as _}; use genawaiter::GeneratorState; use strum::IntoEnumIterator; +use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace, warn, Span}; @@ -44,7 +45,7 @@ pub async fn run_session( init: SessionInit, initial_transmission: InitialTransmission, event_sender: EventSender, - update_receiver: flume::Receiver, + update_receiver: impl Stream + Unpin + 'static, ) -> Result<(), Error> { debug!(role = ?our_role, mode = ?init.mode, "start session"); let Channels { send, recv } = channels; @@ -69,7 +70,7 @@ pub async fn run_session( let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); - let mut update_receiver = Cancelable::new(update_receiver.into_stream(), cancel_token.clone()); + let mut update_receiver = Cancelable::new(update_receiver, cancel_token.clone()); let caps = Capabilities::new( initial_transmission.our_nonce, @@ -82,7 +83,6 @@ pub async fn run_session( let initial_interests = store.auth().resolve_interests(init.interests)?; let all_interests = Rc::new(RefCell::new(initial_interests.clone())); - let initial_interests = Rc::new(initial_interests); // Setup a channel for the private area intersection finder. let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); @@ -102,19 +102,19 @@ pub async fn run_session( caps.revealed().await; let interests = store.auth().resolve_interests(interests)?; for (authorisation, aois) in interests.into_iter() { - let mut all_interests = all_interests.borrow_mut(); - let is_new_cap; - match all_interests.entry(authorisation.clone()) { - hash_map::Entry::Occupied(mut entry) => { - is_new_cap = false; - entry.get_mut().extend(aois.clone()); + let is_new_cap = { + let mut all_interests = all_interests.borrow_mut(); + match all_interests.entry(authorisation.clone()) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().extend(aois.clone()); + false + } + hash_map::Entry::Vacant(entry) => { + entry.insert(aois.clone()); + true + } } - hash_map::Entry::Vacant(entry) => { - is_new_cap = true; - entry.insert(aois.clone()); - } - } - drop(all_interests); + }; if let Some(capability_handle) = caps.find_ours(authorisation.read_cap()) { @@ -139,7 +139,6 @@ pub async fn run_session( } } } - // tokens.bind_theirs(message.static_token); } Ok(()) } @@ -182,7 +181,8 @@ pub async fn run_session( namespace: intersection.authorisation.namespace(), area: intersection.authorisation.read_cap().granted_area().clone(), }; - event_sender.send(event).await?; + // TODO: break if error? + event_sender.send(event).await.ok(); on_pai_intersection( &interests, store.secrets(), From 0a21ca815bcaf88a597a093eb39cce5aef68bf1b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 16 Jul 2024 15:53:19 +0200 Subject: [PATCH 086/198] refactor: use genawaiter and stack futures within a single session --- iroh-willow/src/session/aoi_finder.rs | 232 +++++++--- iroh-willow/src/session/capabilities.rs | 45 +- iroh-willow/src/session/data.rs | 29 +- iroh-willow/src/session/error.rs | 11 + iroh-willow/src/session/events.rs | 12 +- iroh-willow/src/session/pai_finder.rs | 31 +- iroh-willow/src/session/reconciler.rs | 161 ++++--- iroh-willow/src/session/run.rs | 553 +++++++++++------------- iroh-willow/src/util.rs | 1 + iroh-willow/src/util/gen_stream.rs | 62 +++ 10 files changed, 630 insertions(+), 507 deletions(-) create mode 100644 iroh-willow/src/util/gen_stream.rs diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index bcbab75f2e..354c83eeaa 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -1,12 +1,27 @@ -use std::{cell::RefCell, rc::Rc}; +use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc}; + +use futures_lite::{Stream, StreamExt}; +use genawaiter::rc::Co; +use tokio::sync::mpsc; use crate::{ + auth::InterestMap, proto::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, - sync::{AreaOfInterestHandle, CapabilityHandle, ReadCapability, SetupBindAreaOfInterest}, + sync::{ + AreaOfInterestHandle, CapabilityHandle, IntersectionHandle, ReadAuthorisation, + ReadCapability, SetupBindAreaOfInterest, + }, + }, + session::{ + capabilities::Capabilities, + channels::ChannelSenders, + pai_finder::{self, PaiIntersection}, + resource::ResourceMap, + Error, Scope, }, - session::{channels::ChannelSenders, resource::ResourceMap, Error, Scope}, + util::gen_stream::GenStream, }; /// Intersection between two areas of interest. @@ -28,96 +43,190 @@ impl AoiIntersection { } } -#[derive(Debug, Default, Clone)] -pub struct AoiFinder(Rc>); +#[derive(Debug)] +pub enum Input { + AddInterests(InterestMap), + PaiIntersection(PaiIntersection), + ReceivedValidatedAoi { + namespace: NamespaceId, + aoi: AreaOfInterest, + }, +} -pub type AoiIntersectionReceiver = flume::Receiver; +#[derive(Debug)] +pub enum Output { + SendMessage(SetupBindAreaOfInterest), + SubmitAuthorisation(ReadAuthorisation), + AoiIntersection(AoiIntersection), + SignAndSendCapability { + handle: IntersectionHandle, + capability: ReadCapability, + }, +} -#[derive(Debug, Default)] -struct Inner { - our_handles: ResourceMap, - their_handles: ResourceMap, - subscribers: Vec>, +#[derive(derive_more::Debug)] +pub struct IntersectionFinder { + #[debug("Co")] + co: Co, + caps: Capabilities, + handles: AoiResources, + interests: InterestMap, } -impl AoiFinder { - pub async fn bind_and_send_ours( - &self, - sender: &ChannelSenders, - namespace: NamespaceId, - aoi: AreaOfInterest, - authorisation: CapabilityHandle, - ) -> Result<(), Error> { - self.bind(Scope::Ours, namespace, aoi.clone())?; - let msg = SetupBindAreaOfInterest { - area_of_interest: aoi, - authorisation, - }; - sender.send(msg).await?; - Ok(()) +impl IntersectionFinder { + /// Run the [`IntersectionFinder`]. + /// + /// The returned stream is a generator, so it must be polled repeatedly to progress. + pub fn run_gen( + caps: Capabilities, + inbox: impl Stream, + ) -> impl Stream> { + GenStream::new(|co| Self::new(co, caps).run(inbox)) } - pub fn validate_and_bind_theirs( - &self, - their_cap: &ReadCapability, - aoi: AreaOfInterest, - ) -> Result<(), Error> { - their_cap.try_granted_area(&aoi.area)?; - self.bind(Scope::Theirs, their_cap.granted_namespace().id(), aoi)?; - Ok(()) + fn new(co: Co, caps: Capabilities) -> Self { + Self { + co, + caps, + interests: Default::default(), + handles: Default::default(), + } } - pub fn subscribe(&self) -> flume::Receiver { - let (tx, rx) = flume::bounded(2); - self.0.borrow_mut().subscribers.push(tx); - rx + async fn run(mut self, inbox: impl Stream) -> Result<(), Error> { + tokio::pin!(inbox); + while let Some(input) = inbox.next().await { + match input { + Input::AddInterests(interests) => self.add_interests(interests).await, + Input::PaiIntersection(intersection) => { + self.on_pai_intersection(intersection).await?; + } + Input::ReceivedValidatedAoi { namespace, aoi } => { + self.handles + .bind_validated(&self.co, Scope::Theirs, namespace, aoi) + .await; + } + } + } + Ok(()) } - pub fn close(&self) { - let mut inner = self.0.borrow_mut(); - inner.subscribers.drain(..); + async fn add_interests(&mut self, interests: InterestMap) { + for (authorisation, aois) in interests.into_iter() { + let capability_handle = self.caps.find_ours(authorisation.read_cap()); + let namespace = authorisation.namespace(); + match self.interests.entry(authorisation.clone()) { + hash_map::Entry::Occupied(mut entry) => { + // The authorisation is already submitted. + let existing = entry.get_mut(); + for aoi in aois { + // If the AoI is new, and the capability is already bound, bind and send + // the AoI right away. + if existing.insert(aoi.clone()) { + if let Some(capability_handle) = capability_handle { + self.handles + .bind_and_send_ours(&self.co, namespace, capability_handle, aoi) + .await; + } + } + } + } + hash_map::Entry::Vacant(entry) => { + // The authorisation is new. Submit to the PaiFinder. + entry.insert(aois); + self.co + .yield_(Output::SubmitAuthorisation(authorisation)) + .await; + } + } + } } - fn bind(&self, scope: Scope, namespace: NamespaceId, aoi: AreaOfInterest) -> Result<(), Error> { - let mut inner = self.0.borrow_mut(); - inner.bind_validated_aoi(scope, namespace, aoi) + async fn on_pai_intersection(&mut self, intersection: PaiIntersection) -> Result<(), Error> { + let PaiIntersection { + authorisation, + handle, + } = intersection; + let aois = self + .interests + .get(&authorisation) + .ok_or(Error::NoKnownInterestsForCapability)? + .clone(); + let namespace = authorisation.namespace(); + let (capability_handle, is_new) = self.caps.bind_ours(authorisation.read_cap().clone()); + if is_new { + self.co + .yield_(Output::SignAndSendCapability { + handle, + capability: authorisation.read_cap().clone(), + }) + .await; + } + + for aoi in aois.into_iter() { + self.handles + .bind_and_send_ours(&self.co, namespace, capability_handle, aoi) + .await; + } + Ok(()) } } -impl Inner { - pub fn bind_validated_aoi( +#[derive(Debug, Default)] +struct AoiResources { + our_handles: ResourceMap, + their_handles: ResourceMap, +} + +impl AoiResources { + async fn bind_and_send_ours( &mut self, + co: &Co, + namespace: NamespaceId, + authorisation: CapabilityHandle, + aoi: AreaOfInterest, + ) { + self.bind_validated(co, Scope::Ours, namespace, aoi.clone()) + .await; + let msg = SetupBindAreaOfInterest { + area_of_interest: aoi, + authorisation, + }; + co.yield_(Output::SendMessage(msg)).await; + } + pub async fn bind_validated( + &mut self, + co: &Co, scope: Scope, namespace: NamespaceId, aoi: AreaOfInterest, - ) -> Result<(), Error> { - // let area = aoi.area.clone(); + ) { let info = AoiInfo { aoi: aoi.clone(), namespace, }; - let handle = match scope { + let bound_handle = match scope { Scope::Ours => self.our_handles.bind(info), Scope::Theirs => self.their_handles.bind(info), }; - let other_resources = match scope { + let store_to_check_against = match scope { Scope::Ours => &self.their_handles, Scope::Theirs => &self.our_handles, }; // TODO: If we stored the AoIs by namespace we would need to iterate less. - for (candidate_handle, candidate) in other_resources.iter() { - if candidate.namespace != namespace { + for (other_handle, other_aoi) in store_to_check_against.iter() { + if other_aoi.namespace != namespace { continue; } - let candidate_handle = *candidate_handle; + let other_handle = *other_handle; // Check if we have an intersection. - if let Some(intersection) = candidate.aoi.intersection(&aoi) { + if let Some(intersection) = other_aoi.aoi.intersection(&aoi) { // We found an intersection! let (our_handle, their_handle) = match scope { - Scope::Ours => (handle, candidate_handle), - Scope::Theirs => (candidate_handle, handle), + Scope::Ours => (bound_handle, other_handle), + Scope::Theirs => (other_handle, bound_handle), }; let intersection = AoiIntersection { our_handle, @@ -125,12 +234,9 @@ impl Inner { intersection, namespace, }; - // TODO: This can block... - self.subscribers - .retain(|sender| sender.send(intersection.clone()).is_ok()); + co.yield_(Output::AoiIntersection(intersection)).await; } } - Ok(()) } } @@ -139,9 +245,3 @@ struct AoiInfo { aoi: AreaOfInterest, namespace: NamespaceId, } - -impl AoiInfo { - // fn area(&self) -> &Area { - // &self.aoi.area - // } -} diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index a49bcd8a2e..5ab04ec46b 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -58,45 +58,28 @@ impl Capabilities { }) } - pub async fn bind_and_send_ours( - &self, - secret_store: &S, - sender: &ChannelSenders, - intersection_handle: IntersectionHandle, - capability: ReadCapability, - ) -> Result { - let (handle, message) = - self.bind_and_sign_ours(secret_store, intersection_handle, capability)?; - if let Some(message) = message { - sender.send(message).await?; - } - Ok(handle) - } - pub fn find_ours(&self, cap: &ReadCapability) -> Option { self.0.borrow().ours.find(cap) } - pub fn bind_and_sign_ours( + pub fn sign_capability( &self, secret_store: &S, intersection_handle: IntersectionHandle, capability: ReadCapability, - ) -> Result<(CapabilityHandle, Option), Error> { - let mut inner = self.0.borrow_mut(); - let (handle, is_new) = inner.ours.bind_if_new(capability.clone()); - let message = if is_new { - let signable = inner.challenge.signable()?; - let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; - Some(SetupBindReadCapability { - capability, - handle: intersection_handle, - signature, - }) - } else { - None - }; - Ok((handle, message)) + ) -> Result { + let inner = self.0.borrow(); + let signable = inner.challenge.signable()?; + let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; + Ok(SetupBindReadCapability { + capability, + handle: intersection_handle, + signature, + }) + } + + pub fn bind_ours(&self, capability: ReadCapability) -> (CapabilityHandle, bool) { + self.0.borrow_mut().ours.bind_if_new(capability) } pub fn validate_and_bind_theirs( diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index e78dcb1c0f..fb9ba2fbea 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -1,4 +1,6 @@ +use futures_lite::StreamExt; use tokio::sync::broadcast; +use tokio_stream::wrappers::ReceiverStream; use crate::{ proto::{ @@ -6,39 +8,48 @@ use crate::{ willow::AuthorisedEntry, }, session::{ - aoi_finder::AoiIntersectionReceiver, channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, - static_tokens::StaticTokens, Error, SessionId, + channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, + SessionId, }, store::{ entry::{EntryChannel, EntryOrigin}, traits::Storage, Store, }, + util::stream::Cancelable, }; -use super::payload::{send_payload_chunked, CurrentPayload}; +use super::{ + aoi_finder::AoiIntersection, + payload::{send_payload_chunked, CurrentPayload}, +}; + +#[derive(Debug)] +pub enum Input { + AoiIntersection(AoiIntersection), +} #[derive(derive_more::Debug)] pub struct DataSender { + inbox: Cancelable>, store: Store, send: ChannelSenders, - aoi_queue: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, } impl DataSender { pub fn new( + inbox: Cancelable>, store: Store, send: ChannelSenders, - aoi_queue: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, ) -> Self { Self { + inbox, store, send, - aoi_queue, static_tokens, session_id, } @@ -47,10 +58,11 @@ impl DataSender { let mut entry_stream = self.store.entries().subscribe(self.session_id); loop { tokio::select! { - intersection = self.aoi_queue.recv_async() => { - let Ok(intersection) = intersection else { + input = self.inbox.next() => { + let Some(input) = input else { break; }; + let Input::AoiIntersection(intersection) = input; self.store.entries().watch_area( self.session_id, intersection.namespace, @@ -69,6 +81,7 @@ impl DataSender { } } } + tracing::debug!("data sender done"); Ok(()) } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index a5a35c07da..530c5cd366 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -71,6 +71,17 @@ pub enum Error { Pai(#[from] PaiError), #[error("net failed: {0}")] Net(anyhow::Error), + #[error("channel receiver dropped")] + ChannelDropped, +} + +#[derive(Debug, thiserror::Error)] +#[error("channel receiver dropped")] +pub struct ChannelReceiverDropped; +impl From for Error { + fn from(_: ChannelReceiverDropped) -> Self { + Self::ChannelDropped + } } // TODO: Remove likely? diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs index f5570a9a22..830710e06c 100644 --- a/iroh-willow/src/session/events.rs +++ b/iroh-willow/src/session/events.rs @@ -27,7 +27,10 @@ use crate::{ keys::NamespaceId, sync::{ReadAuthorisation, ReadCapability}, }, - session::{Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate}, + session::{ + error::ChannelReceiverDropped, Error, Interests, Role, SessionId, SessionInit, SessionMode, + SessionUpdate, + }, store::traits::Storage, }; @@ -43,11 +46,8 @@ const INTENT_EVENT_CAP: usize = 64; pub struct EventSender(pub mpsc::Sender); impl EventSender { - pub async fn send(&self, event: EventKind) -> Result<(), Error> { - self.0 - .send(event) - .await - .map_err(|_| Error::InvalidState("session event receiver dropped")) + pub async fn send(&self, event: EventKind) -> Result<(), ChannelReceiverDropped> { + self.0.send(event).await.map_err(|_| ChannelReceiverDropped) } } diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 48f6eb73d4..ce6a74049a 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -8,11 +8,16 @@ //! Licensed under LGPL and ported into this MIT/Apache codebase with explicit permission //! from the original author (gwil). -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use genawaiter::rc::Gen; +use genawaiter::{rc::Gen, GeneratorState}; use tracing::{debug, trace}; use crate::{ @@ -29,6 +34,7 @@ use crate::{ resource::{MissingResource, ResourceMap}, Error, Scope, }, + util::gen_stream::GenStream, }; #[derive(Debug, thiserror::Error)] @@ -76,10 +82,14 @@ pub struct PaiFinder { } impl PaiFinder { + /// Run the [`PaiFinder`]. + /// + /// The returned stream is a generator, so it must be polled repeatedly for the [`PaiFinder`] + /// to progress. pub fn run_gen( inbox: impl Stream + Unpin, - ) -> Gen>> { - Gen::new(|co| PaiFinder::new(co).run(inbox)) + ) -> impl Stream> { + GenStream::new(|co| PaiFinder::new(co).run(inbox)) } #[cfg(test)] @@ -88,16 +98,11 @@ impl PaiFinder { mut outbox: impl futures_util::Sink + Unpin, ) -> Result<(), Error> { use futures_util::SinkExt; - use genawaiter::GeneratorState; - - let mut gen = Gen::new(|co| PaiFinder::new(co).run(inbox)); - loop { - let y = gen.async_resume().await; - match y { - GeneratorState::Yielded(output) => outbox.send(output).await?, - GeneratorState::Complete(res) => break res, - } + let mut gen = Self::run_gen(inbox); + while let Some(output) = gen.try_next().await? { + outbox.send(output).await?; } + Ok(()) } pub fn new(co: genawaiter::rc::Co) -> Self { diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index b1f0578b8d..9e0abdde9e 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -6,9 +6,25 @@ use std::{ use bytes::Bytes; use futures_lite::StreamExt; +use genawaiter::rc::Co; use iroh_blobs::store::Store as PayloadStore; +use tokio_stream::wrappers::ReceiverStream; use tracing::{debug, trace}; +#[derive(Debug)] +pub enum Input { + AoiIntersection(AoiIntersection), +} + +#[derive(Debug)] +pub enum Output { + ReconciledArea { + namespace: NamespaceId, + area: AreaOfInterest, + }, + ReconciledAll, +} + use crate::{ proto::{ grouping::{AreaOfInterest, ThreeDRange}, @@ -21,7 +37,7 @@ use crate::{ willow::PayloadDigest, }, session::{ - aoi_finder::{AoiIntersection, AoiIntersectionReceiver}, + aoi_finder::AoiIntersection, channels::{ChannelSenders, MessageReceiver}, events::{EventKind, EventSender}, payload::{send_payload_chunked, CurrentPayload}, @@ -33,26 +49,13 @@ use crate::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Store, }, - util::stream::Cancelable, + util::{gen_stream::GenStream, stream::Cancelable}, }; -// pub enum Input { -// Received(ReconciliationMessage), -// Intersection(AoiIntersection), -// } -// -// pub enum Output { -// Reconciled { -// namespace: NamespaceId, -// area: AreaOfInterest, -// }, -// } - #[derive(derive_more::Debug)] pub struct Reconciler { shared: Shared, recv: Cancelable>, - events: EventSender, targets: TargetMap, current_entry: CurrentEntry, } @@ -60,69 +63,59 @@ pub struct Reconciler { type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); impl Reconciler { - #[allow(clippy::too_many_arguments)] - pub fn new( + /// Run the [`Reconciler`]. + /// + /// The returned stream is a generator, so it must be polled repeatedly to progress. + pub fn run_gen( + inbox: Cancelable>, store: Store, recv: Cancelable>, - aoi_intersection_receiver: AoiIntersectionReceiver, static_tokens: StaticTokens, session_id: SessionId, send: ChannelSenders, - events: EventSender, our_role: Role, - mode: SessionMode, - ) -> Result { - let shared = Shared { - store, - our_role, - send, - static_tokens, - session_id, - mode, - }; - Ok(Self { - shared, - recv, - targets: TargetMap::new(aoi_intersection_receiver), - current_entry: Default::default(), - events, + ) -> impl futures_lite::Stream> { + GenStream::new(|co| { + let shared = Shared { + co, + store, + our_role, + send, + static_tokens, + session_id, + }; + Self { + shared, + recv, + targets: TargetMap::new(inbox), + current_entry: Default::default(), + } + .run() }) } pub async fn run(mut self) -> Result<(), Error> { loop { tokio::select! { - message = self.recv.try_next() => { + Some(message) = self.recv.next() => { tracing::trace!(?message, "tick: recv"); - match message? { - None => break, - Some(message) => match self.received_message(message).await? { - ControlFlow::Continue(_) => {} - ControlFlow::Break(_) => { - debug!("reconciliation complete"); - if self.shared.mode == SessionMode::ReconcileOnce { - break; - } - } + self.received_message(message?).await?; + } + Some(input) = self.targets.inbox.next() => { + tracing::trace!(?input, "tick: input"); + match input { + Input::AoiIntersection(intersection) => { + self.targets.init_target(&self.shared, intersection).await?; } } } - Ok(intersection) = self.targets.aoi_intersection_receiver.recv_async() => { - tracing::trace!(?intersection, "tick: interesection"); - let area = intersection.intersection.clone(); - let namespace = intersection.namespace; - self.targets.init_target(&self.shared, intersection).await?; - self.events.send(EventKind::InterestIntersection { namespace, area }).await?; - } + else => break, } } Ok(()) } - async fn received_message( - &mut self, - message: ReconciliationMessage, - ) -> Result, Error> { + async fn received_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { match message { ReconciliationMessage::SendFingerprint(message) => { self.targets @@ -143,7 +136,7 @@ impl Reconciler { .received_announce_entries(&self.shared, message) .await?; if target.is_complete() && self.current_entry.is_none() { - return self.complete_target(target_id).await; + self.complete_target(target_id).await?; } } ReconciliationMessage::SendEntry(message) => { @@ -183,44 +176,48 @@ impl Reconciler { .get(&completed_target) .expect("target to exist"); if target.is_complete() { - return self.complete_target(target.id()).await; + self.complete_target(target.id()).await?; } } } }; - Ok(ControlFlow::Continue(())) + Ok(()) } - pub async fn complete_target(&mut self, id: TargetId) -> Result, Error> { + pub async fn complete_target(&mut self, id: TargetId) -> Result<(), Error> { let target = self .targets .map .remove(&id) .ok_or(Error::InvalidMessageInCurrentState)?; - let event = EventKind::Reconciled { + self.out(Output::ReconciledArea { area: target.intersection.intersection.clone(), namespace: target.namespace(), - }; - self.events.send(event).await?; + }) + .await; if self.targets.map.is_empty() { - Ok(ControlFlow::Break(())) - } else { - Ok(ControlFlow::Continue(())) + debug!("reconciliation complete"); + self.out(Output::ReconciledAll).await; } + Ok(()) + } + + async fn out(&self, output: Output) { + self.shared.co.yield_(output).await; } } #[derive(Debug)] struct TargetMap { map: HashMap>, - aoi_intersection_receiver: AoiIntersectionReceiver, + inbox: Cancelable>, } impl TargetMap { - pub fn new(aoi_intersection_receiver: AoiIntersectionReceiver) -> Self { + pub fn new(inbox: Cancelable>) -> Self { Self { map: Default::default(), - aoi_intersection_receiver, + inbox, } } pub async fn get_eventually( @@ -239,17 +236,18 @@ impl TargetMap { shared: &Shared, requested_id: &TargetId, ) -> Result<(), Error> { - loop { - let intersection = self - .aoi_intersection_receiver - .recv_async() - .await - .map_err(|_| Error::InvalidState("aoi finder closed"))?; - let id = self.init_target(shared, intersection).await?; - if id == *requested_id { - break Ok(()); + while let Some(input) = self.inbox.next().await { + match input { + Input::AoiIntersection(intersection) => { + let id = self.init_target(shared, intersection).await?; + if id == *requested_id { + return Ok(()); + } + } } } + // TODO: Error? + Ok(()) } async fn init_target( @@ -345,14 +343,15 @@ struct EntryState { payload: CurrentPayload, } -#[derive(Debug)] +#[derive(derive_more::Debug)] struct Shared { + #[debug("Co")] + co: Co, store: Store, our_role: Role, send: ChannelSenders, static_tokens: StaticTokens, session_id: SessionId, - mode: SessionMode, } #[derive(Debug)] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 410dace4c9..50ffe3a1fa 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,22 +1,32 @@ -use std::{cell::RefCell, collections::hash_map, rc::Rc}; +use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc}; -use futures_concurrency::stream::StreamExt as _; +use futures_concurrency::{ + future::{Join, TryJoin}, + stream::StreamExt as _, +}; use futures_lite::{Stream, StreamExt as _}; +use futures_util::{Sink, SinkExt}; use genawaiter::GeneratorState; use strum::IntoEnumIterator; use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, trace, warn, Span}; +use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::{CancellationToken, PollSender}; +use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ auth::InterestMap, - proto::sync::{ControlIssueGuarantee, InitialTransmission, LogicalChannel, Message}, + proto::sync::{ + ControlIssueGuarantee, InitialTransmission, LogicalChannel, Message, + SetupBindAreaOfInterest, + }, session::{ - aoi_finder::AoiFinder, + aoi_finder::{self, IntersectionFinder}, capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, + data, events::{EventKind, EventSender, SessionEvent}, pai_finder::{self as pai, PaiFinder, PaiIntersection}, + reconciler, static_tokens::StaticTokens, Channels, Error, Role, SessionId, SessionInit, SessionUpdate, }, @@ -30,6 +40,7 @@ use crate::{ use super::{ channels::ChannelReceivers, data::{DataReceiver, DataSender}, + error::ChannelReceiverDropped, reconciler::Reconciler, SessionMode, }; @@ -48,7 +59,10 @@ pub async fn run_session( update_receiver: impl Stream + Unpin + 'static, ) -> Result<(), Error> { debug!(role = ?our_role, mode = ?init.mode, "start session"); - let Channels { send, recv } = channels; + let Channels { + send: channel_sender, + recv, + } = channels; let ChannelReceivers { control_recv, logical_recv: @@ -77,312 +91,250 @@ pub async fn run_session( initial_transmission.received_commitment, ); let tokens = StaticTokens::default(); - let aoi_finder = AoiFinder::default(); - - let tasks = Tasks::default(); - - let initial_interests = store.auth().resolve_interests(init.interests)?; - let all_interests = Rc::new(RefCell::new(initial_interests.clone())); - - // Setup a channel for the private area intersection finder. - let (pai_inbox_tx, pai_inbox_rx) = flume::bounded(128); - - // Spawn a task to handle session updates. - tasks.spawn(error_span!("upd"), { - let store = store.clone(); - let caps = caps.clone(); - let to_pai = pai_inbox_tx.clone(); - let all_interests = all_interests.clone(); - let sender = send.clone(); - let aoi_finder = aoi_finder.clone(); - async move { - while let Some(update) = update_receiver.next().await { - match update { - SessionUpdate::AddInterests(interests) => { - caps.revealed().await; - let interests = store.auth().resolve_interests(interests)?; - for (authorisation, aois) in interests.into_iter() { - let is_new_cap = { - let mut all_interests = all_interests.borrow_mut(); - match all_interests.entry(authorisation.clone()) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().extend(aois.clone()); - false - } - hash_map::Entry::Vacant(entry) => { - entry.insert(aois.clone()); - true - } - } - }; - if let Some(capability_handle) = - caps.find_ours(authorisation.read_cap()) - { - let namespace = authorisation.namespace(); - for aoi in aois.into_iter() { - aoi_finder - .bind_and_send_ours( - &sender, - namespace, - aoi, - capability_handle, - ) - .await?; - } - } - if is_new_cap { - to_pai - .send_async(pai::Input::SubmitAuthorisation(authorisation)) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; - } - } - } + + // Setup a channels for communication between the loops. + let (pai_inbox, pai_inbox_rx) = channel::(2); + let pai_inbox_rx = Cancelable::new(pai_inbox_rx, cancel_token.clone()); + + let (intersection_inbox, intersection_inbox_rx) = channel::(2); + let intersection_inbox_rx = Cancelable::new(intersection_inbox_rx, cancel_token.clone()); + + let (rec_inbox, rec_inbox_rx) = channel::(2); + let rec_inbox_rx = Cancelable::new(rec_inbox_rx, cancel_token.clone()); + + // Setup data channels only if in live mode. + let (data_inbox, data_inbox_rx) = if init.mode == SessionMode::Live { + let (data_inbox, data_inbox_rx) = channel::(2); + let data_inbox_rx = Cancelable::new(data_inbox_rx, cancel_token.clone()); + (Some(data_inbox), Some(data_inbox_rx)) + } else { + (None, None) + }; + + let initial_interests_fut = with_span(error_span!("init"), async { + caps.revealed().await; + let interests = store.auth().resolve_interests(init.interests)?; + intersection_inbox + .send(aoi_finder::Input::AddInterests(interests)) + .await?; + Result::<_, Error>::Ok(()) + }); + + let data_loop = with_span(error_span!("data"), async { + // Start data loop only if in live mode. + if let Some(inbox) = data_inbox_rx { + let send_fut = DataSender::new( + inbox, + store.clone(), + channel_sender.clone(), + tokens.clone(), + session_id, + ) + .run(); + let recv_fut = async { + let mut data_receiver = + DataReceiver::new(store.clone(), tokens.clone(), session_id); + while let Some(message) = data_recv.try_next().await? { + data_receiver.on_message(message).await?; } - } + tracing::debug!("data receiver done"); + Ok(()) + }; + (send_fut, recv_fut).try_join().await?; + Ok(()) + } else { Ok(()) } }); - // Spawn a task to setup the initial interests - tasks.spawn(error_span!("setup-pai"), { - let caps = caps.clone(); - let to_pai = pai_inbox_tx.clone(); - async move { - caps.revealed().await; - for authorisation in initial_interests.keys() { - to_pai - .send_async(pai::Input::SubmitAuthorisation(authorisation.clone())) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; + let update_loop = with_span(error_span!("update"), async { + while let Some(update) = update_receiver.next().await { + match update { + SessionUpdate::AddInterests(interests) => { + caps.revealed().await; + let interests = store.auth().resolve_interests(interests)?; + intersection_inbox + .send(aoi_finder::Input::AddInterests(interests)) + .await?; + } } - Ok(()) } + Ok(()) }); - tasks.spawn(error_span!("pai"), { - let store = store.clone(); - let send = send.clone(); - let caps = caps.clone(); - let inbox = pai_inbox_rx - .into_stream() - .merge(intersection_recv.map(pai::Input::ReceivedMessage)); - let interests = Rc::clone(&all_interests); - let aoi_finder = aoi_finder.clone(); - let event_sender = event_sender.clone(); - async move { - let mut gen = PaiFinder::run_gen(inbox); - loop { - match gen.async_resume().await { - GeneratorState::Yielded(output) => match output { - pai::Output::SendMessage(message) => send.send(message).await?, - pai::Output::NewIntersection(intersection) => { - let event = EventKind::CapabilityIntersection { - namespace: intersection.authorisation.namespace(), - area: intersection.authorisation.read_cap().granted_area().clone(), - }; - // TODO: break if error? - event_sender.send(event).await.ok(); - on_pai_intersection( - &interests, - store.secrets(), - &aoi_finder, - &caps, - &send, - intersection, - ) + let intersection_loop = with_span(error_span!("intersection"), async { + use aoi_finder::Output; + let mut gen = IntersectionFinder::run_gen(caps.clone(), intersection_inbox_rx); + while let Some(output) = gen.try_next().await? { + match output { + Output::SendMessage(message) => channel_sender.send(message).await?, + Output::SubmitAuthorisation(authorisation) => { + pai_inbox + .send(pai::Input::SubmitAuthorisation(authorisation)) + .await?; + } + Output::AoiIntersection(intersection) => { + let area = intersection.intersection.clone(); + let namespace = intersection.namespace; + rec_inbox + .send(reconciler::Input::AoiIntersection(intersection.clone())) + .await?; + event_sender + .send(EventKind::InterestIntersection { namespace, area }) + .await?; + if let Some(data_inbox) = &data_inbox { + data_inbox + .send(data::Input::AoiIntersection(intersection.clone())) .await?; - } - pai::Output::SignAndSendSubspaceCap(handle, cap) => { - let message = - caps.sign_subspace_capabiltiy(store.secrets(), cap, handle)?; - send.send(Box::new(message)).await?; - } - }, - GeneratorState::Complete(res) => { - return res; } } - } - } - }); - - // Spawn a task to handle incoming static tokens. - tasks.spawn(error_span!("stt"), { - let tokens = tokens.clone(); - async move { - while let Some(message) = static_tokens_recv.try_next().await? { - tokens.bind_theirs(message.static_token); - } - Ok(()) - } - }); - - // Only setup data receiver if session is configured in live mode. - if init.mode == SessionMode::Live { - tasks.spawn(error_span!("data-recv"), { - let store = store.clone(); - let tokens = tokens.clone(); - async move { - let mut data_receiver = DataReceiver::new(store, tokens, session_id); - while let Some(message) = data_recv.try_next().await? { - data_receiver.on_message(message).await?; + Output::SignAndSendCapability { handle, capability } => { + let message = caps.sign_capability(store.secrets(), handle, capability)?; + channel_sender.send(message).await?; } - Ok(()) - } - }); - tasks.spawn(error_span!("data-send"), { - let store = store.clone(); - let tokens = tokens.clone(); - let send = send.clone(); - let aoi_intersections = aoi_finder.subscribe(); - async move { - DataSender::new(store, send, aoi_intersections, tokens, session_id) - .run() - .await?; - Ok(()) - } - }); - } - - // Spawn a task to handle incoming capabilities. - tasks.spawn(error_span!("cap-recv"), { - let to_pai = pai_inbox_tx.clone(); - let caps = caps.clone(); - async move { - while let Some(message) = capability_recv.try_next().await? { - let handle = message.handle; - caps.validate_and_bind_theirs(message.capability, message.signature)?; - to_pai - .send_async(pai::Input::ReceivedReadCapForIntersection(handle)) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; } - Ok(()) } + Ok(()) }); - // Spawn a task to handle incoming areas of interest. - tasks.spawn(error_span!("aoi-recv"), { - let aoi_finder = aoi_finder.clone(); - let caps = caps.clone(); - async move { - while let Some(message) = aoi_recv.try_next().await? { - let cap = caps.get_theirs_eventually(message.authorisation).await; - aoi_finder.validate_and_bind_theirs(&cap, message.area_of_interest)?; + let pai_loop = with_span(error_span!("pai"), async { + use pai::Output; + let inbox = pai_inbox_rx.merge(intersection_recv.map(pai::Input::ReceivedMessage)); + let mut gen = PaiFinder::run_gen(inbox); + while let Some(output) = gen.try_next().await? { + match output { + Output::SendMessage(message) => channel_sender.send(message).await?, + Output::NewIntersection(intersection) => { + let event = EventKind::CapabilityIntersection { + namespace: intersection.authorisation.namespace(), + area: intersection.authorisation.read_cap().granted_area().clone(), + }; + ( + intersection_inbox.send(aoi_finder::Input::PaiIntersection(intersection)), + event_sender.send(event), + ) + .try_join() + .await?; + } + Output::SignAndSendSubspaceCap(handle, cap) => { + let message = caps.sign_subspace_capabiltiy(store.secrets(), cap, handle)?; + channel_sender.send(Box::new(message)).await?; + } } - aoi_finder.close(); - Ok(()) } + Ok(()) }); - // Spawn a task to handle reconciliation messages - tasks.spawn(error_span!("rec"), { - let cancel_token = cancel_token.clone(); - let aoi_intersections = aoi_finder.subscribe(); - let reconciler = Reconciler::new( + let reconciler_loop = with_span(error_span!("reconciler"), async { + use reconciler::Output; + let mut gen = Reconciler::run_gen( + rec_inbox_rx, store.clone(), reconciliation_recv, - aoi_intersections, tokens.clone(), session_id, - send.clone(), - event_sender.clone(), + channel_sender.clone(), our_role, - init.mode, - )?; - async move { - let res = reconciler.run().await; - if res.is_ok() && !init.mode.is_live() { - debug!("reconciliation complete and not in live mode: trigger cancel"); - cancel_token.cancel(); + ); + while let Some(output) = gen.try_next().await? { + match output { + Output::ReconciledArea { namespace, area } => { + event_sender + .send(EventKind::Reconciled { namespace, area }) + .await?; + } + Output::ReconciledAll => { + // Stop session if not in live mode; + if !init.mode.is_live() { + cancel_token.cancel(); + break; + } + } } - res } + Ok(()) }); - // Spawn a task to handle control messages - tasks.spawn(error_span!("ctl-recv"), { - let cancel_token = cancel_token.clone(); - let fut = control_loop(our_role, caps, send.clone(), control_recv, pai_inbox_tx); - async move { - let res = fut.await; - if res.is_ok() { - debug!("control channel closed: trigger cancel"); - cancel_token.cancel(); - } - res + let token_recv_loop = with_span(error_span!("token_recv"), async { + while let Some(message) = static_tokens_recv.try_next().await? { + tokens.bind_theirs(message.static_token); } + Ok(()) }); - // Wait until the session is cancelled, or until a task fails. - let result = loop { - tokio::select! { - _ = cancel_token.cancelled() => { - debug!("cancel token triggered: close session"); - break Ok(()); - }, - Some((span, result)) = tasks.join_next() => { - let _guard = span.enter(); - trace!(?result, remaining = tasks.remaining_tasks(), "task complete"); - match result { - Err(err) => { - warn!(?err, "session task paniced: abort session"); - break Err(Error::TaskFailed(err)); - }, - Ok(Err(err)) => { - warn!(?err, "session task failed: abort session"); - break Err(err); - } - Ok(Ok(())) => {} - } - }, + let caps_recv_loop = with_span(error_span!("caps_recv"), async { + while let Some(message) = capability_recv.try_next().await? { + let handle = message.handle; + caps.validate_and_bind_theirs(message.capability, message.signature)?; + pai_inbox + .send(pai::Input::ReceivedReadCapForIntersection(handle)) + .await?; } - }; + Ok(()) + }); - if result.is_err() { - debug!("aborting session"); - tasks.abort_all(); - } else { - debug!("closing session"); + let control_loop = with_span(error_span!("control"), async { + let res = control_loop(control_recv, our_role, &caps, &channel_sender, &pai_inbox).await; + cancel_token.cancel(); + res + }); + + let aoi_recv_loop = with_span(error_span!("aoi_recv"), async { + while let Some(message) = aoi_recv.try_next().await? { + let SetupBindAreaOfInterest { + area_of_interest, + authorisation, + } = message; + let cap = caps.get_theirs_eventually(authorisation).await; + cap.try_granted_area(&area_of_interest.area)?; + let namespace = cap.granted_namespace().id(); + intersection_inbox + .send(aoi_finder::Input::ReceivedValidatedAoi { + namespace, + aoi: area_of_interest, + }) + .await?; + } + Ok(()) + }); + + let result = ( + initial_interests_fut, + control_loop, + data_loop, + update_loop, + pai_loop, + intersection_loop, + reconciler_loop, + token_recv_loop, + caps_recv_loop, + aoi_recv_loop, + ) + .try_join() + .await; + + match &result { + Ok(_) => debug!("session complete"), + Err(err) => debug!(?err, "session failed"), } // Unsubscribe from the store. This stops the data send task. store.entries().unsubscribe(&session_id); - // Wait for remaining tasks to terminate to catch any panics. - // TODO: Add timeout? - while let Some((span, result)) = tasks.join_next().await { - let _guard = span.enter(); - trace!( - ?result, - remaining = tasks.remaining_tasks(), - "task complete" - ); - match result { - Err(err) if err.is_cancelled() => {} - Err(err) => warn!("task paniced: {err:?}"), - Ok(Err(err)) => warn!("task failed: {err:?}"), - Ok(Ok(())) => {} - } - } - // Close our channel senders. // This will stop the network send loop after all pending data has been sent. - send.close_all(); + channel_sender.close_all(); debug!(success = result.is_ok(), "session complete"); - result + result.map(|_| ()) } -pub type Tasks = SharedJoinMap>; - async fn control_loop( - our_role: Role, - caps: Capabilities, - sender: ChannelSenders, mut control_recv: Cancelable>, - to_pai: flume::Sender, + our_role: Role, + caps: &Capabilities, + sender: &ChannelSenders, + pai_inbox: &Sender, ) -> Result<(), Error> { // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; @@ -409,20 +361,18 @@ async fn control_loop( sender.get_logical(channel).add_guarantees(amount); } Message::PaiRequestSubspaceCapability(msg) => { - to_pai - .send_async(pai::Input::ReceivedSubspaceCapRequest(msg.handle)) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; + pai_inbox + .send(pai::Input::ReceivedSubspaceCapRequest(msg.handle)) + .await?; } Message::PaiReplySubspaceCapability(msg) => { caps.verify_subspace_cap(&msg.capability, &msg.signature)?; - to_pai - .send_async(pai::Input::ReceivedVerifiedSubspaceCapReply( + pai_inbox + .send(pai::Input::ReceivedVerifiedSubspaceCapReply( msg.handle, msg.capability.granted_namespace().id(), )) - .await - .map_err(|_| Error::InvalidState("PAI actor dead"))?; + .await?; } _ => return Err(Error::UnsupportedMessage), } @@ -431,34 +381,33 @@ async fn control_loop( Ok(()) } -async fn on_pai_intersection( - interests: &Rc>, - secrets: &S, - aoi_finder: &AoiFinder, - capabilities: &Capabilities, - sender: &ChannelSenders, - intersection: PaiIntersection, -) -> Result<(), Error> { - let PaiIntersection { - authorisation, - handle, - } = intersection; - let aois = { - let interests = interests.borrow(); - interests - .get(&authorisation) - .ok_or(Error::NoKnownInterestsForCapability)? - .clone() - }; - let namespace = authorisation.namespace(); - let capability_handle = capabilities - .bind_and_send_ours(secrets, sender, handle, authorisation.read_cap().clone()) - .await?; - - for aoi in aois.into_iter() { - aoi_finder - .bind_and_send_ours(sender, namespace, aoi, capability_handle) - .await?; +fn channel(cap: usize) -> (Sender, ReceiverStream) { + let (tx, rx) = mpsc::channel(cap); + (Sender(tx), ReceiverStream::new(rx)) +} + +#[derive(Debug)] +pub struct Sender(mpsc::Sender); + +impl Clone for Sender { + fn clone(&self) -> Self { + Self(self.0.clone()) } - Ok(()) +} + +impl Sender { + async fn send(&self, item: T) -> Result<(), ChannelReceiverDropped> { + self.0.send(item).await.map_err(|_| ChannelReceiverDropped) + } +} + +async fn with_span(span: Span, fut: impl Future>) -> Result<(), Error> { + async move { + tracing::debug!("start"); + let res = fut.await; + tracing::debug!(?res, "done"); + res + } + .instrument(span) + .await } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index f417fb773f..45fa3500da 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -2,6 +2,7 @@ pub mod channel; pub mod codec; +pub mod gen_stream; pub mod queue; pub mod stream; pub mod task; diff --git a/iroh-willow/src/util/gen_stream.rs b/iroh-willow/src/util/gen_stream.rs new file mode 100644 index 0000000000..97ed86fe86 --- /dev/null +++ b/iroh-willow/src/util/gen_stream.rs @@ -0,0 +1,62 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use genawaiter::{ + rc::{Co, Gen}, + GeneratorState, +}; + +#[derive(derive_more::Debug)] +pub struct GenStream +where + Fut: Future>, +{ + #[debug("Gen")] + gen: Gen, + is_complete: bool, +} + +impl GenStream +where + Fut: Future>, +{ + pub fn new(producer: impl FnOnce(Co) -> Fut) -> Self { + Self::from_gen(Gen::new(producer)) + } + + pub fn from_gen(gen: Gen) -> Self { + Self { + gen, + is_complete: false, + } + } +} + +impl futures_lite::Stream for GenStream +where + Fut: Future>, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.is_complete { + return Poll::Ready(None); + } + let item = { + let mut fut = self.gen.async_resume(); + let out = std::task::ready!(Pin::new(&mut fut).poll(cx)); + match out { + GeneratorState::Yielded(output) => Some(Ok(output)), + GeneratorState::Complete(Ok(())) => None, + GeneratorState::Complete(Err(err)) => Some(Err(err)), + } + }; + if matches!(item, None | Some(Err(_))) { + self.is_complete = true; + } + Poll::Ready(item) + } +} From 08ed2ce897d2f248f39ac2e3178b22a05ece6e09 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 17 Jul 2024 14:29:30 +0200 Subject: [PATCH 087/198] refactor: cleanup and fix intents --- iroh-willow/docs/willow-over-quic.md | 0 iroh-willow/examples/bench.rs | 236 +++++++++ iroh-willow/src/actor.rs | 74 +-- iroh-willow/src/engine.rs | 92 ++-- iroh-willow/src/net.rs | 51 +- iroh-willow/src/session.rs | 16 +- iroh-willow/src/session/aoi_finder.rs | 2 +- iroh-willow/src/session/capabilities.rs | 8 +- iroh-willow/src/session/events.rs | 649 +++++------------------- iroh-willow/src/session/intents.rs | 443 ++++++++++++++++ iroh-willow/src/session/pai_finder.rs | 23 +- iroh-willow/src/session/run.rs | 189 +++++-- iroh-willow/src/util/gen_stream.rs | 30 +- iroh-willow/src/util/stream.rs | 21 +- 14 files changed, 1134 insertions(+), 700 deletions(-) create mode 100644 iroh-willow/docs/willow-over-quic.md create mode 100644 iroh-willow/examples/bench.rs create mode 100644 iroh-willow/src/session/intents.rs diff --git a/iroh-willow/docs/willow-over-quic.md b/iroh-willow/docs/willow-over-quic.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs new file mode 100644 index 0000000000..b574a817af --- /dev/null +++ b/iroh-willow/examples/bench.rs @@ -0,0 +1,236 @@ +fn main() {} + +// use std::{collections::BTreeSet, time::Instant}; +// +// use futures_lite::StreamExt; +// use iroh_base::key::SecretKey; +// use iroh_net::{Endpoint, NodeAddr, NodeId}; +// use rand::SeedableRng; +// use tracing::info; +// +// use iroh_willow::{ +// actor::ActorHandle, +// auth::{CapSelector, DelegateTo}, +// form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, +// net::run, +// proto::{ +// grouping::ThreeDRange, +// keys::{NamespaceId, NamespaceKind, UserId}, +// meadowcap::AccessMode, +// willow::{Entry, InvalidPath, Path}, +// }, +// session::{Interests, Role, SessionInit, SessionMode}, +// }; +// +// const ALPN: &[u8] = b"iroh-willow/0"; +// +// #[tokio::main(flavor = "multi_thread")] +// async fn main() -> anyhow::Result<()> { +// tracing_subscriber::fmt::init(); +// let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); +// let n_betty = parse_env_var("N_BETTY", 100); +// let n_alfie = parse_env_var("N_ALFIE", 100); +// +// let (ep_alfie, node_id_alfie, _) = create_endpoint(&mut rng).await?; +// let (ep_betty, node_id_betty, addr_betty) = create_endpoint(&mut rng).await?; +// +// let start = Instant::now(); +// let mut expected_entries = BTreeSet::new(); +// +// let handle_alfie = ActorHandle::spawn_memory(Default::default(), node_id_alfie); +// let handle_betty = ActorHandle::spawn_memory(Default::default(), node_id_betty); +// +// let user_alfie = handle_alfie.create_user().await?; +// let user_betty = handle_betty.create_user().await?; +// +// let namespace_id = handle_alfie +// .create_namespace(NamespaceKind::Owned, user_alfie) +// .await?; +// +// let cap_for_betty = handle_alfie +// .delegate_caps( +// CapSelector::widest(namespace_id), +// AccessMode::Write, +// DelegateTo::new(user_betty, None), +// ) +// .await?; +// +// handle_betty.import_caps(cap_for_betty).await?; +// +// insert( +// &handle_alfie, +// namespace_id, +// user_alfie, +// n_alfie, +// |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), +// |n| format!("alfie{n}"), +// &mut expected_entries, +// ) +// .await?; +// +// insert( +// &handle_betty, +// namespace_id, +// user_betty, +// n_betty, +// |n| Path::new(&[b"betty", n.to_string().as_bytes()]), +// |n| format!("betty{n}"), +// &mut expected_entries, +// ) +// .await?; +// +// let init_alfie = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); +// let init_betty = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); +// +// info!("init took {:?}", start.elapsed()); +// +// let start = Instant::now(); +// let (conn_alfie, conn_betty) = tokio::join!( +// async move { ep_alfie.connect(addr_betty, ALPN).await.unwrap() }, +// async move { ep_betty.accept().await.unwrap().await.unwrap() } +// ); +// info!("connecting took {:?}", start.elapsed()); +// +// let start = Instant::now(); +// let (session_alfie, session_betty) = tokio::join!( +// run( +// node_id_alfie, +// handle_alfie.clone(), +// conn_alfie, +// Role::Alfie, +// init_alfie +// ), +// run( +// node_id_betty, +// handle_betty.clone(), +// conn_betty, +// Role::Betty, +// init_betty +// ) +// ); +// let mut session_alfie = session_alfie?; +// let mut session_betty = session_betty?; +// let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); +// info!(time=?start.elapsed(), "reconciliation finished"); +// +// info!("alfie res {:?}", res_alfie); +// info!("betty res {:?}", res_betty); +// assert!(res_alfie.is_ok()); +// assert!(res_betty.is_ok()); +// let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; +// let betty_entries = get_entries(&handle_betty, namespace_id).await?; +// info!("alfie has now {} entries", alfie_entries.len()); +// info!("betty has now {} entries", betty_entries.len()); +// // not using assert_eq because it would print a lot in case of failure +// assert!(alfie_entries == expected_entries, "alfie expected entries"); +// assert!(betty_entries == expected_entries, "betty expected entries"); +// +// Ok(()) +// } +// +// pub async fn create_endpoint( +// rng: &mut rand_chacha::ChaCha12Rng, +// ) -> anyhow::Result<(Endpoint, NodeId, NodeAddr)> { +// let ep = Endpoint::builder() +// .secret_key(SecretKey::generate_with_rng(rng)) +// .alpns(vec![ALPN.to_vec()]) +// .bind(0) +// .await?; +// let addr = ep.node_addr().await?; +// let node_id = ep.node_id(); +// Ok((ep, node_id, addr)) +// } +// +// async fn get_entries( +// store: &ActorHandle, +// namespace: NamespaceId, +// ) -> anyhow::Result> { +// let entries: anyhow::Result> = store +// .get_entries(namespace, ThreeDRange::full()) +// .await? +// .try_collect() +// .await; +// entries +// } +// +// async fn insert( +// handle: &ActorHandle, +// namespace_id: NamespaceId, +// user_id: UserId, +// count: usize, +// path_fn: impl Fn(usize) -> Result, +// content_fn: impl Fn(usize) -> String, +// track_entries: &mut impl Extend, +// ) -> anyhow::Result<()> { +// for i in 0..count { +// let payload = content_fn(i).as_bytes().to_vec(); +// let path = path_fn(i).expect("invalid path"); +// let entry = EntryForm { +// namespace_id, +// subspace_id: SubspaceForm::User, +// path, +// timestamp: TimestampForm::Now, +// payload: PayloadForm::Bytes(payload.into()), +// }; +// let (entry, inserted) = handle.insert(entry, AuthForm::Any(user_id)).await?; +// assert!(inserted); +// track_entries.extend([entry]); +// } +// Ok(()) +// } +// +// fn parse_env_var(var: &str, default: T) -> T +// where +// T: std::str::FromStr, +// T::Err: std::fmt::Debug, +// { +// match std::env::var(var).as_deref() { +// Ok(val) => val +// .parse() +// .unwrap_or_else(|_| panic!("failed to parse environment variable {var}")), +// Err(_) => default, +// } +// } +// +// // async fn get_entries_debug( +// // store: &StoreHandle, +// // namespace: NamespaceId, +// // ) -> anyhow::Result> { +// // let entries = get_entries(store, namespace).await?; +// // let mut entries: Vec<_> = entries +// // .into_iter() +// // .map(|e| (e.subspace_id, e.path)) +// // .collect(); +// // entries.sort(); +// // Ok(entries) +// // } +// // +// // +// // +// // tokio::task::spawn({ +// // let handle_alfie = handle_alfie.clone(); +// // let handle_betty = handle_betty.clone(); +// // async move { +// // loop { +// // info!( +// // "alfie count: {}", +// // handle_alfie +// // .get_entries(namespace_id, ThreeDRange::full()) +// // .await +// // .unwrap() +// // .count() +// // .await +// // ); +// // info!( +// // "betty count: {}", +// // handle_betty +// // .get_entries(namespace_id, ThreeDRange::full()) +// // .await +// // .unwrap() +// // .count() +// // .await +// // ); +// // tokio::time::sleep(Duration::from_secs(1)).await; +// // } +// // } +// // }); diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/actor.rs index 09e5573bfa..e928957533 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/actor.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; -use anyhow::Result; +use anyhow::{Context, Result}; use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; use futures_util::future::{self, FutureExt}; use iroh_base::key::NodeId; @@ -23,6 +23,7 @@ use crate::{ }, session::{ events::{EventKind, EventSender, SessionEvent}, + intents::IntentData, run_session, Channels, Error, Interests, Role, SessionId, SessionInit, SessionUpdate, }, store::{ @@ -150,7 +151,8 @@ impl ActorHandle { our_role: Role, initial_transmission: InitialTransmission, channels: Channels, - init: SessionInit, + // init: SessionInit, + intents: Vec, ) -> Result { let (reply, reply_rx) = oneshot::channel(); self.send(ToActor::InitSession { @@ -158,7 +160,8 @@ impl ActorHandle { initial_transmission, peer, channels, - init, + // init, + intents, reply, }) .await?; @@ -238,27 +241,27 @@ impl Drop for ActorHandle { #[derive(Debug)] pub struct SessionHandle { - pub session_id: SessionId, + // pub session_id: SessionId, pub cancel_token: CancellationToken, pub update_tx: mpsc::Sender, - pub event_rx: mpsc::Receiver, + pub event_rx: mpsc::Receiver, } impl SessionHandle { - pub fn session_id(&self) -> SessionId { - self.session_id - } + // pub fn session_id(&self) -> SessionId { + // self.session_id + // } /// Wait for the session to finish. /// /// Returns an error if the session failed to complete. pub async fn on_finish(&mut self) -> Result<(), Arc> { while let Some(event) = self.event_rx.recv().await { - if let EventKind::Closed { result } = event { + if let SessionEvent::Complete { result } = event { return result; } } - Err(Arc::new(Error::ActorFailed)) + Ok(()) } pub async fn send_update(&self, update: SessionUpdate) -> Result<()> { @@ -272,6 +275,7 @@ impl SessionHandle { /// Previously queued messages will still be sent out. The session will only be closed /// once the other peer closes their senders as well. pub fn close(&self) { + debug!("close session (session handle close called)"); self.cancel_token.cancel(); } } @@ -284,7 +288,8 @@ pub enum ToActor { initial_transmission: InitialTransmission, #[debug(skip)] channels: Channels, - init: SessionInit, + // init: SessionInit, + intents: Vec, reply: oneshot::Sender>, }, GetEntries { @@ -338,10 +343,9 @@ pub enum ToActor { #[derive(Debug)] struct ActiveSession { - #[allow(unused)] - peer: NodeId, + // peer: NodeId, task_key: TaskKey, // state: SharedSessionState - event_tx: mpsc::Sender, + // event_tx: mpsc::Sender, } #[derive(Debug)] @@ -350,7 +354,7 @@ pub struct Actor { store: Store, next_session_id: u64, sessions: HashMap, - session_tasks: JoinMap>, + session_tasks: JoinMap>>, tasks: JoinSet<()>, } @@ -385,11 +389,8 @@ impl Actor { } }, Some((id, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => { - let res = match res { - Ok(res) => res, - Err(err) => Err(err.into()) - }; - self.complete_session(&id, res).await; + let _res = res.context("session task paniced")?; + self.complete_session(&id).await; } }; } @@ -411,7 +412,8 @@ impl Actor { channels, our_role, initial_transmission, - init, + intents, + // init, reply, } => { let session_id = self.next_session_id(); @@ -428,7 +430,7 @@ impl Actor { cancel_token.clone(), session_id, our_role, - init, + intents, initial_transmission, EventSender(event_tx.clone()), update_rx, @@ -438,13 +440,13 @@ impl Actor { let task_key = self.session_tasks.spawn_local(session_id, future); let active_session = ActiveSession { - event_tx, + // event_tx, task_key, - peer, + // peer, }; self.sessions.insert(session_id, active_session); let handle = SessionHandle { - session_id, + // session_id, cancel_token, update_tx, event_rx, @@ -524,19 +526,19 @@ impl Actor { } } - async fn complete_session(&mut self, session_id: &SessionId, result: Result<(), Error>) { + async fn complete_session(&mut self, session_id: &SessionId) { let session = self.sessions.remove(session_id); if let Some(session) = session { - debug!(?session, ?result, "complete session"); - let result = match result { - Ok(()) => Ok(()), - Err(err) => Err(Arc::new(err)), - }; - session - .event_tx - .send(EventKind::Closed { result }) - .await - .ok(); + // debug!(?session, ?result, "complete session"); + // let result = match result { + // Ok(()) => Ok(()), + // Err(err) => Err(Arc::new(err)), + // }; + // session + // .event_tx + // .send(EventKind::Closed { result }) + // .await + // .ok(); self.session_tasks.remove(&session.task_key); } else { warn!("remove_session called for unknown session"); diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index e4c704ff15..3256f5a596 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -1,46 +1,46 @@ -use anyhow::Result; -use iroh_blobs::protocol::ALPN; -use iroh_net::{endpoint::Connection, Endpoint, NodeId}; - -use crate::{ - actor::ActorHandle, - net, - session::{Role, SessionInit}, - store::memory, -}; - -#[derive(Debug, Clone)] -pub struct Engine { - endpoint: Endpoint, - handle: ActorHandle, -} - -impl Engine { - pub fn new(endpoint: Endpoint, handle: ActorHandle) -> Self { - Self { endpoint, handle } - } - - pub fn memory(endpoint: Endpoint) -> Self { - let me = endpoint.node_id(); - let payloads = iroh_blobs::store::mem::Store::default(); - let handle = ActorHandle::spawn(move || memory::Store::new(payloads), me); - Self::new(endpoint, handle) - } - - pub async fn handle_connection(&self, conn: Connection, init: SessionInit) -> Result<()> { - let our_role = Role::Betty; - let handle = self.handle.clone(); - let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; - session.join().await?; - Ok(()) - } - - pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result<()> { - let our_role = Role::Alfie; - let conn = self.endpoint.connect_by_node_id(&peer, ALPN).await?; - let handle = self.handle.clone(); - let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; - session.join().await?; - Ok(()) - } -} +// use anyhow::Result; +// use iroh_blobs::protocol::ALPN; +// use iroh_net::{endpoint::Connection, Endpoint, NodeId}; +// +// use crate::{ +// actor::ActorHandle, +// net, +// session::{Role, SessionInit}, +// store::memory, +// }; +// +// #[derive(Debug, Clone)] +// pub struct Engine { +// endpoint: Endpoint, +// handle: ActorHandle, +// } +// +// impl Engine { +// pub fn new(endpoint: Endpoint, handle: ActorHandle) -> Self { +// Self { endpoint, handle } +// } +// +// pub fn memory(endpoint: Endpoint) -> Self { +// let me = endpoint.node_id(); +// let payloads = iroh_blobs::store::mem::Store::default(); +// let handle = ActorHandle::spawn(move || memory::Store::new(payloads), me); +// Self::new(endpoint, handle) +// } +// +// pub async fn handle_connection(&self, conn: Connection, init: SessionInit) -> Result<()> { +// let our_role = Role::Betty; +// let handle = self.handle.clone(); +// let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; +// session.join().await?; +// Ok(()) +// } +// +// pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result<()> { +// let our_role = Role::Alfie; +// let conn = self.endpoint.connect_by_node_id(&peer, ALPN).await?; +// let handle = self.handle.clone(); +// let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; +// session.join().await?; +// Ok(()) +// } +// } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index b58513d6d8..3f1c5f25a4 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -20,6 +20,7 @@ use crate::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, + intents::IntentData, Role, SessionInit, }, util::channel::{ @@ -84,17 +85,18 @@ pub async fn run( actor: ActorHandle, conn: Connection, our_role: Role, - init: SessionInit, + intents: Vec, + // init: SessionInit, ) -> anyhow::Result { let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; let handle = actor - .init_session(peer, our_role, initial_transmission, channels, init) + .init_session(peer, our_role, initial_transmission, channels, intents) .await?; Ok(SessionHandle { handle, tasks }) } - +// #[derive(Debug)] pub struct SessionHandle { handle: actor::SessionHandle, @@ -335,7 +337,7 @@ mod tests { meadowcap::AccessMode, willow::{Entry, InvalidPath, Path}, }, - session::{Interests, Role, SessionInit, SessionMode}, + session::{intents::IntentHandle, Interests, Role, SessionInit, SessionMode}, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -402,6 +404,8 @@ mod tests { let init_alfie = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); let init_betty = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); + let (mut intent_alfie, intent_alfie_data) = IntentHandle::new(init_alfie); + let (mut intent_betty, intent_betty_data) = IntentHandle::new(init_betty); info!("init took {:?}", start.elapsed()); @@ -419,25 +423,33 @@ mod tests { handle_alfie.clone(), conn_alfie, Role::Alfie, - init_alfie + vec![intent_alfie_data] ), run( node_id_betty, handle_betty.clone(), conn_betty, Role::Betty, - init_betty + vec![intent_betty_data] ) ); let mut session_alfie = session_alfie?; let mut session_betty = session_betty?; - let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); - info!(time=?start.elapsed(), "reconciliation finished"); - info!("alfie res {:?}", res_alfie); - info!("betty res {:?}", res_betty); + let (res_alfie, res_betty) = tokio::join!(intent_alfie.complete(), intent_betty.complete()); + info!("alfie intent res {:?}", res_alfie); + info!("betty intent res {:?}", res_betty); + assert!(res_alfie.is_ok()); + assert!(res_betty.is_ok()); + + let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); + info!("alfie session res {:?}", res_alfie); + info!("betty session res {:?}", res_betty); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); + + info!(time=?start.elapsed(), "reconciliation finished"); + let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; let betty_entries = get_entries(&handle_betty, namespace_id).await?; info!("alfie has now {} entries", alfie_entries.len()); @@ -543,20 +555,23 @@ mod tests { let init_alfie = SessionInit::new(Interests::All, SessionMode::Live); let init_betty = SessionInit::new(Interests::All, SessionMode::Live); + let (mut intent_alfie, intent_alfie_data) = IntentHandle::new(init_alfie); + let (mut intent_betty, intent_betty_data) = IntentHandle::new(init_betty); + let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, handle_alfie.clone(), conn_alfie, Role::Alfie, - init_alfie + vec![intent_alfie_data] ), run( node_id_betty, handle_betty.clone(), conn_betty, Role::Betty, - init_betty + vec![intent_betty_data] ) ); let mut session_alfie = session_alfie?; @@ -568,11 +583,17 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; session_alfie.close(); - let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); + let (res_alfie, res_betty) = tokio::join!(intent_alfie.complete(), intent_betty.complete()); info!(time=?start.elapsed(), "reconciliation finished"); + info!("alfie intent res {:?}", res_alfie); + info!("betty intent res {:?}", res_betty); + assert!(res_alfie.is_ok()); + assert!(res_betty.is_ok()); + + let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); - info!("alfie res {:?}", res_alfie); - info!("betty res {:?}", res_betty); + info!("alfie session res {:?}", res_alfie); + info!("betty session res {:?}", res_betty); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 1181a0baee..66e3086b03 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,8 +1,17 @@ -use std::collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use std::{ + collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + sync::mpsc, +}; + +use tokio::sync::oneshot; use crate::{ auth::CapSelector, proto::{grouping::AreaOfInterest, sync::ReadAuthorisation}, + session::{ + events::EventKind, + intents::{IntentChannels, IntentData}, + }, }; mod aoi_finder; @@ -11,6 +20,7 @@ pub mod channels; mod data; mod error; pub mod events; +pub mod intents; mod pai_finder; mod payload; mod reconciler; @@ -114,9 +124,9 @@ impl From for Interests { } } -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum SessionUpdate { - AddInterests(Interests), + SubmitIntent(IntentData), } // impl Interest { diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 354c83eeaa..326b041090 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -113,12 +113,12 @@ impl IntersectionFinder { async fn add_interests(&mut self, interests: InterestMap) { for (authorisation, aois) in interests.into_iter() { - let capability_handle = self.caps.find_ours(authorisation.read_cap()); let namespace = authorisation.namespace(); match self.interests.entry(authorisation.clone()) { hash_map::Entry::Occupied(mut entry) => { // The authorisation is already submitted. let existing = entry.get_mut(); + let capability_handle = self.caps.find_ours(authorisation.read_cap()); for aoi in aois { // If the AoI is new, and the capability is already bound, bind and send // the AoI right away. diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index 5ab04ec46b..ea3508cb35 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -58,6 +58,10 @@ impl Capabilities { }) } + pub fn is_revealed(&self) -> bool { + self.0.borrow().challenge.is_revealed() + } + pub fn find_ours(&self, cap: &ReadCapability) -> Option { self.0.borrow().ours.find(cap) } @@ -138,10 +142,6 @@ impl Capabilities { Ok(()) } - // pub fn is_revealed(&self) -> bool { - // self.0.borrow().challenge.is_revealed() - // } - pub fn sign_subspace_capabiltiy( &self, secrets: &S, diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs index 830710e06c..f978e59f15 100644 --- a/iroh-willow/src/session/events.rs +++ b/iroh-willow/src/session/events.rs @@ -4,6 +4,8 @@ use std::{ }; use anyhow::{anyhow, Context, Result}; +use futures_buffered::join_all; +use futures_concurrency::future::Join; use futures_lite::StreamExt; use futures_util::FutureExt; use iroh_net::{ @@ -28,39 +30,28 @@ use crate::{ sync::{ReadAuthorisation, ReadCapability}, }, session::{ - error::ChannelReceiverDropped, Error, Interests, Role, SessionId, SessionInit, SessionMode, - SessionUpdate, + error::ChannelReceiverDropped, + intents::{IntentChannels, IntentData, IntentHandle, IntentInfo}, + Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate, }, store::traits::Storage, }; -use super::SessionUpdate::AddInterests; - -type NamespaceInterests = HashMap>; - const COMMAND_CHANNEL_CAP: usize = 128; -const INTENT_UPDATE_CAP: usize = 16; -const INTENT_EVENT_CAP: usize = 64; #[derive(Debug, Clone)] -pub struct EventSender(pub mpsc::Sender); +pub struct EventSender(pub mpsc::Sender); impl EventSender { - pub async fn send(&self, event: EventKind) -> Result<(), ChannelReceiverDropped> { + pub async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { self.0.send(event).await.map_err(|_| ChannelReceiverDropped) } } -#[derive(Debug, Clone)] -pub struct SessionEvent { - pub session_id: SessionId, - pub event: EventKind, -} - -impl SessionEvent { - pub fn new(session_id: SessionId, event: EventKind) -> Self { - Self { session_id, event } - } +#[derive(Debug)] +pub enum SessionEvent { + Revealed, + Complete { result: Result<(), Arc> }, } #[derive(Debug, Clone, Eq, PartialEq)] @@ -78,8 +69,8 @@ pub enum EventKind { area: AreaOfInterest, }, ReconciledAll, - Closed { - result: Result<(), Arc>, + Abort { + error: Arc, }, } @@ -94,22 +85,10 @@ impl EventKind { } } -#[derive(Debug)] -pub enum IntentUpdate { - AddInterests(Interests), - Close, -} - #[derive(Debug)] pub enum Command { - SyncWithPeer { - peer: NodeId, - init: SessionInit, - reply: oneshot::Sender>, - }, - HandleConnection { - conn: Connection, - }, + SubmitIntent { peer: NodeId, intent: IntentData }, + HandleConnection { conn: Connection }, } #[derive(Debug, Clone)] @@ -129,16 +108,14 @@ impl ManagedHandle { let (command_tx, command_rx) = mpsc::channel(COMMAND_CHANNEL_CAP); let peer_manager = PeerManager { session_event_rx: Default::default(), - intent_update_rx: Default::default(), + betty_intent_rx: Default::default(), command_rx, establish_tasks: Default::default(), net_tasks: Default::default(), actor: actor.clone(), peers: Default::default(), - sessions: Default::default(), endpoint: endpoint.clone(), dialer: Dialer::new(endpoint), - next_intent_id: 0, }; let task_handle = tokio::task::spawn( async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } @@ -159,11 +136,12 @@ impl ManagedHandle { } pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { - let (reply, reply_rx) = oneshot::channel(); + // TODO: expose cap + let (handle, intent) = IntentHandle::new(init); self.command_tx - .send(Command::SyncWithPeer { peer, init, reply }) + .send(Command::SubmitIntent { peer, intent }) .await?; - reply_rx.await? + Ok(handle) } } @@ -179,23 +157,17 @@ type NetTasks = JoinSet>; type EstablishRes = (NodeId, Result<(NetTasks, SessionHandle)>); -pub type IntentId = (NodeId, u64); - #[derive(derive_more::Debug)] pub struct PeerManager { - session_event_rx: StreamMap>, - #[debug("StreamMap")] - intent_update_rx: StreamMap>>, + session_event_rx: StreamMap>, + betty_intent_rx: StreamMap>, command_rx: mpsc::Receiver, establish_tasks: JoinSet, net_tasks: JoinSet<(NodeId, Result<()>)>, - actor: ActorHandle, peers: HashMap, - sessions: HashMap, endpoint: Endpoint, dialer: Dialer, - next_intent_id: u64, } impl PeerManager { @@ -205,16 +177,9 @@ impl PeerManager { Some((session_id, event)) = self.session_event_rx.next(), if !self.session_event_rx.is_empty() => { self.received_event(session_id, event).await; } - Some(((peer, intent_id), event)) = self.intent_update_rx.next(), if !self.intent_update_rx.is_empty() => { - if let Some(event) = event { - // Received an intent update. - if let Err(err) = self.update_intent(peer, intent_id, event).await { - tracing::warn!(peer=%peer.fmt_short(), %intent_id, ?err, "failed to update intent"); - } - } else { - // The intent update sender was dropped: Cancel the intent. - self.cancel_intent(peer, intent_id); - } + Some((_session_id, _event)) = self.betty_intent_rx.next(), if !self.betty_intent_rx.is_empty() => { + // TODO: Do we want to emit these somewhere? + // self.received_event(session_id, event).await; } Some(command) = self.command_rx.recv() => { self.received_command(command).await; @@ -223,7 +188,7 @@ impl PeerManager { match res { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("establish task paniced")?, - Ok((peer, Ok((tasks, handle)))) => self.on_established(peer, handle, tasks).await?, + Ok((peer, Ok((tasks, handle)))) => self.on_established(peer, handle, tasks)?, Ok((peer, Err(err))) => self.remove_peer(peer, Err(Arc::new(Error::Net(err)))).await, } } @@ -232,7 +197,10 @@ impl PeerManager { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("net task paniced")?, Ok((_peer, Ok(())))=> continue, - Ok((peer, Err(err))) => self.on_net_task_failed(peer, err), + Ok((peer, Err(err))) => { + // TODO: Forward to session? + tracing::warn!(?peer, ?err, "net task failed"); + } } }, Some((peer, conn)) = self.dialer.next() => { @@ -248,34 +216,49 @@ impl PeerManager { Ok(()) } + pub async fn received_command(&mut self, command: Command) { + tracing::info!(?command, "command"); + match command { + Command::SubmitIntent { peer, intent } => { + if let Err(err) = self.submit_intent(peer, intent).await { + tracing::warn!("failed to submit intent: {err:?}"); + } + } + Command::HandleConnection { conn } => { + self.handle_connection(conn, Role::Betty).await; + } + } + } + async fn remove_peer(&mut self, peer: NodeId, result: Result<(), Arc>) { let Some(peer_state) = self.peers.remove(&peer) else { tracing::warn!(?peer, "attempted to remove unknown peer"); return; }; - let (intents, session_id) = match peer_state { + let intents = match peer_state { PeerState::Connecting { intents, .. } => { self.dialer.abort_dial(&peer); - (Some(intents), None) + Some(intents) } - PeerState::Establishing { intents, .. } => (Some(intents), None), - PeerState::Active { session_id } => { - let session = self.sessions.remove(&session_id); - let intents = session.map(|session| session.intents); - (intents, Some(session_id)) + PeerState::Establishing { intents, .. } => Some(intents), + PeerState::Active { cancel_token, .. } => { + cancel_token.cancel(); + None } PeerState::Placeholder => unreachable!(), }; if let Some(intents) = intents { - for intent in &intents { - self.intent_update_rx.remove(&(peer, intent.intent_id)); + if let Err(error) = result { + join_all( + intents + .into_iter() + .map(|intent| intent.send_abort(error.clone())), + ) + .await; } - let senders = intents.into_iter().map(|intent| intent.event_tx); - send_all(senders, EventKind::Closed { result }).await; - } - if let Some(session_id) = session_id { - self.session_event_rx.remove(&session_id); } + self.session_event_rx.remove(&peer); + self.betty_intent_rx.remove(&peer); } async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { @@ -283,23 +266,7 @@ impl PeerManager { self.remove_peer(peer, result).await; } - fn session_mut(&mut self, peer: &NodeId) -> Option<&mut SessionInfo> { - let peer_state = self.peers.get(peer)?; - match peer_state { - PeerState::Active { session_id } => self.sessions.get_mut(session_id), - _ => None, - } - } - - fn on_net_task_failed(&mut self, peer: NodeId, err: anyhow::Error) { - if let Some(session) = self.session_mut(&peer) { - if session.net_error.is_none() { - session.net_error = Some(err); - } - } - } - - async fn on_established( + fn on_established( &mut self, peer: NodeId, session_handle: SessionHandle, @@ -311,19 +278,15 @@ impl PeerManager { .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; let current_state = std::mem::replace(peer_state, PeerState::Placeholder); let PeerState::Establishing { - our_role, - intents, - submitted_interests, - pending_interests, + // our_role, + intents: _, + betty_catchall_intent, } = current_state else { anyhow::bail!("unreachable: on_established called for peer in wrong state") }; - if our_role.is_alfie() && intents.is_empty() { - session_handle.close(); - } let SessionHandle { - session_id, + // session_id, cancel_token, update_tx, event_rx, @@ -331,238 +294,57 @@ impl PeerManager { self.net_tasks.spawn( async move { crate::net::join_all(&mut net_tasks).await }.map(move |r| (peer, r)), ); - let mut session_info = SessionInfo { - peer, - our_role, - complete_areas: Default::default(), - submitted_interests, - intents, - net_error: None, - update_tx, + self.session_event_rx + .insert(peer, ReceiverStream::new(event_rx)); + // TODO: submit intents that were submitted while establishing + // for intent in intents { + // update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; + // } + if let Some(handle) = betty_catchall_intent { + self.betty_intent_rx.insert(peer, handle.split().1); + } + *peer_state = PeerState::Active { + // session_id, cancel_token, + update_tx, + // our_role, }; - if !pending_interests.is_empty() { - session_info.push_interests(pending_interests).await?; - } - self.sessions.insert(session_id, session_info); - self.session_event_rx - .insert(session_id, ReceiverStream::new(event_rx)); - *peer_state = PeerState::Active { session_id }; Ok(()) } - pub async fn sync_with_peer( - &mut self, - peer: NodeId, - init: SessionInit, - ) -> Result { - let intent_interests = self.actor.resolve_interests(init.interests).await?; - // TODO: Allow to configure cap? - let (event_tx, event_rx) = mpsc::channel(INTENT_EVENT_CAP); - let (update_tx, update_rx) = mpsc::channel(INTENT_UPDATE_CAP); - let intent_id = { - let intent_id = self.next_intent_id; - self.next_intent_id += 1; - intent_id - }; - let info = IntentInfo { - intent_id, - interests: flatten_interests(&intent_interests), - mode: init.mode, - event_tx, - }; - let handle = IntentHandle { - event_rx, - update_tx, - }; - self.intent_update_rx.insert( - (peer, intent_id), - StreamNotifyClose::new(ReceiverStream::new(update_rx)), - ); + pub async fn submit_intent(&mut self, peer: NodeId, intent: IntentData) -> Result<()> { match self.peers.get_mut(&peer) { None => { self.dialer.queue_dial(peer, ALPN); - let intents = vec![info]; - let peer_state = PeerState::Connecting { - intents, - interests: intent_interests, - }; + let intents = vec![intent]; + let peer_state = PeerState::Connecting { intents }; self.peers.insert(peer, peer_state); } Some(state) => match state { - PeerState::Connecting { intents, interests } => { - intents.push(info); - merge_interests(interests, intent_interests); + PeerState::Connecting { intents } => { + intents.push(intent); } - PeerState::Establishing { - intents, - pending_interests, - .. - } => { - intents.push(info); - merge_interests(pending_interests, intent_interests); + PeerState::Establishing { intents, .. } => { + intents.push(intent); } - PeerState::Active { session_id, .. } => { - let session = self.sessions.get_mut(session_id).expect("session to exist"); - session.intents.push(info); - session.push_interests(intent_interests).await?; + PeerState::Active { update_tx, .. } => { + update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; } PeerState::Placeholder => unreachable!(), }, }; - Ok(handle) - } - - pub async fn update_intent( - &mut self, - peer: NodeId, - intent_id: u64, - update: IntentUpdate, - ) -> Result<()> { - match update { - IntentUpdate::AddInterests(interests) => { - let add_interests = self.actor.resolve_interests(interests).await?; - match self.peers.get_mut(&peer) { - None => anyhow::bail!("invalid node id"), - Some(peer_state) => match peer_state { - PeerState::Connecting { intents, interests } => { - let intent_info = intents - .iter_mut() - .find(|i| i.intent_id == intent_id) - .ok_or_else(|| anyhow!("invalid intent id"))?; - intent_info.merge_interests(&add_interests); - merge_interests(interests, add_interests); - } - PeerState::Establishing { - intents, - pending_interests, - .. - } => { - let intent_info = intents - .iter_mut() - .find(|i| i.intent_id == intent_id) - .ok_or_else(|| anyhow!("invalid intent id"))?; - intent_info.merge_interests(&add_interests); - merge_interests(pending_interests, add_interests); - } - PeerState::Active { session_id, .. } => { - let session = - self.sessions.get_mut(session_id).expect("session to exist"); - let Some(intent_info) = session - .intents - .iter_mut() - .find(|i| i.intent_id == intent_id) - else { - anyhow::bail!("invalid intent id"); - }; - intent_info.merge_interests(&add_interests); - session.push_interests(add_interests).await?; - } - PeerState::Placeholder => unreachable!(), - }, - }; - } - IntentUpdate::Close => { - self.cancel_intent(peer, intent_id); - } - } Ok(()) } - pub fn cancel_intent(&mut self, peer: NodeId, intent_id: u64) { - let Some(peer_state) = self.peers.get_mut(&peer) else { - return; - }; - - self.intent_update_rx.remove(&(peer, intent_id)); - - match peer_state { - PeerState::Connecting { intents, .. } => { - intents.retain(|intent_info| intent_info.intent_id != intent_id); - if intents.is_empty() { - self.dialer.abort_dial(&peer); - self.peers.remove(&peer); - } - } - PeerState::Establishing { intents, .. } => { - intents.retain(|intent_info| intent_info.intent_id != intent_id); - } - PeerState::Active { session_id, .. } => { - let session = self.sessions.get_mut(session_id).expect("session to exist"); - session - .intents - .retain(|intent| intent.intent_id != intent_id); - if session.intents.is_empty() { - session.cancel_token.cancel(); - } - } - PeerState::Placeholder => unreachable!(), - } - } - - pub async fn received_command(&mut self, command: Command) { - tracing::info!(?command, "command"); - match command { - Command::SyncWithPeer { peer, init, reply } => { - let res = self.sync_with_peer(peer, init).await; - reply.send(res).ok(); - } - Command::HandleConnection { conn } => { - self.handle_connection(conn, Role::Betty).await; - } - } - } - - pub async fn received_event(&mut self, session_id: SessionId, event: EventKind) { + pub async fn received_event(&mut self, peer: NodeId, event: SessionEvent) { tracing::info!(?event, "event"); - let Some(session) = self.sessions.get_mut(&session_id) else { - tracing::warn!(?session_id, ?event, "Got event for unknown session"); - return; - }; - - let peer = session.peer; - - if let EventKind::Closed { mut result } = event { - if result.is_ok() { - // Inject error from networking tasks. - if let Some(net_error) = session.net_error.take() { - result = Err(Arc::new(Error::Net(net_error))); - } + match event { + SessionEvent::Revealed => {} + SessionEvent::Complete { result } => { + self.remove_peer(peer, result).await; } - self.remove_peer(peer, result).await; - return; - } - - if let EventKind::Reconciled { namespace, area } = &event { - session - .complete_areas - .entry(*namespace) - .or_default() - .insert(area.clone()); - } - - let send_futs = session - .intents - .iter_mut() - .map(|intent_info| intent_info.handle_event(&event)); - let send_res = futures_buffered::join_all(send_futs).await; - let mut removed = 0; - for (i, res) in send_res.into_iter().enumerate() { - match res { - Err(ReceiverDropped) | Ok(false) => { - session.intents.remove(i - removed); - removed += 1; - } - Ok(true) => {} - } - } - - // Cancel the session if all intents are gone. - if session.our_role.is_alfie() && session.intents.is_empty() { - session.cancel_token.cancel(); } } - async fn handle_connection(&mut self, conn: Connection, our_role: Role) { let peer = match iroh_net::endpoint::get_remote_node_id(&conn) { Ok(node_id) => node_id, @@ -571,21 +353,21 @@ impl PeerManager { return; } }; - if let Err(err) = self.handle_connection_inner(peer, conn, our_role).await { + if let Err(err) = self.handle_connection_inner(peer, conn, our_role) { tracing::warn!(?peer, ?err, "failed to establish connection"); let result = Err(Arc::new(Error::Net(err))); self.remove_peer(peer, result).await; } } - async fn handle_connection_inner( + fn handle_connection_inner( &mut self, peer: NodeId, conn: Connection, our_role: Role, ) -> Result<()> { let peer_state = self.peers.get_mut(&peer); - let (interests, mode, intents) = match our_role { + let (intents, betty_catchall_intent) = match our_role { Role::Alfie => { let peer_state = peer_state .ok_or_else(|| anyhow!("got connection for peer without any intents"))?; @@ -600,18 +382,11 @@ impl PeerManager { tracing::warn!("got connection for already establishing peer"); return Ok(()); } - PeerState::Connecting { intents, interests } => { - let mode = if intents.iter().any(|i| matches!(i.mode, SessionMode::Live)) { - SessionMode::Live - } else { - SessionMode::ReconcileOnce - }; - (interests, mode, intents) - } + PeerState::Connecting { intents } => (intents, None), } } Role::Betty => { - let intents = if let Some(peer_state) = peer_state { + let mut intents = if let Some(peer_state) = peer_state { let peer_state = std::mem::replace(peer_state, PeerState::Placeholder); match peer_state { PeerState::Placeholder => unreachable!(), @@ -631,232 +406,57 @@ impl PeerManager { } else { Default::default() }; - let interests = self.actor.resolve_interests(Interests::All).await?; - (interests, SessionMode::Live, intents) + let all_init = SessionInit::new(Interests::All, SessionMode::Live); + let (handle, data) = IntentHandle::new(all_init); + intents.push(data); + (intents, Some(handle)) } }; let me = self.endpoint.node_id(); let actor = self.actor.clone(); - let submitted_interests = interests.clone(); - let init = SessionInit { - mode, - interests: Interests::Exact(interests), - }; let establish_fut = async move { let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; let session_handle = actor - .init_session(peer, our_role, initial_transmission, channels, init) + .init_session(peer, our_role, initial_transmission, channels, intents) .await?; Ok::<_, anyhow::Error>((tasks, session_handle)) }; let establish_fut = establish_fut.map(move |res| (peer, res)); let _task_handle = self.establish_tasks.spawn(establish_fut); let peer_state = PeerState::Establishing { - our_role, - intents, - submitted_interests, - pending_interests: Default::default(), + // our_role, + intents: Vec::new(), + betty_catchall_intent, }; self.peers.insert(peer, peer_state); Ok(()) } } -#[derive(Debug)] -struct SessionInfo { - peer: NodeId, - our_role: Role, - complete_areas: NamespaceInterests, - submitted_interests: InterestMap, - intents: Vec, - net_error: Option, - cancel_token: CancellationToken, - update_tx: mpsc::Sender, -} - -impl SessionInfo { - async fn push_interests(&mut self, interests: InterestMap) -> Result<()> { - let new_interests = self.merge_interests(interests); - self.update_tx - .send(AddInterests(Interests::Exact(new_interests))) - .await?; - Ok(()) - } - - fn merge_interests(&mut self, interests: InterestMap) -> InterestMap { - let mut new: InterestMap = HashMap::new(); - for (auth, aois) in interests.into_iter() { - match self.submitted_interests.entry(auth.clone()) { - hash_map::Entry::Vacant(entry) => { - entry.insert(aois.clone()); - new.insert(auth, aois); - } - hash_map::Entry::Occupied(mut entry) => { - let existing = entry.get_mut(); - for aoi in aois { - if !existing.contains(&aoi) { - existing.insert(aoi.clone()); - new.entry(auth.clone()).or_default().insert(aoi); - } - } - } - } - } - new - } -} - #[derive(Debug)] enum PeerState { Connecting { - intents: Vec, - interests: InterestMap, + intents: Vec, }, Establishing { - our_role: Role, - intents: Vec, - submitted_interests: InterestMap, - pending_interests: InterestMap, + // our_role: Role, + intents: Vec, + betty_catchall_intent: Option, }, Active { - session_id: SessionId, + // session_id: SessionId, + // our_role: Role, + update_tx: mpsc::Sender, + cancel_token: CancellationToken, }, Placeholder, } -#[derive(Debug)] -pub struct IntentHandle { - event_rx: mpsc::Receiver, - update_tx: mpsc::Sender, -} - -impl IntentHandle { - // TODO: impl stream - pub async fn next(&mut self) -> Option { - self.event_rx.recv().await - } - - pub async fn complete(&mut self) -> Result<(), Arc> { - loop { - let event = self - .event_rx - .recv() - .await - .ok_or_else(|| Arc::new(Error::ActorFailed))?; - if let EventKind::Closed { result } = event { - return result; - } - } - } - - pub async fn add_interests(&self, interests: impl Into) -> Result<()> { - self.update_tx - .send(IntentUpdate::AddInterests(interests.into())) - .await?; - Ok(()) - } - - pub async fn close(&self) { - self.update_tx.send(IntentUpdate::Close).await.ok(); - } -} - -#[derive(Debug)] -struct IntentInfo { - intent_id: u64, - interests: NamespaceInterests, - mode: SessionMode, - event_tx: mpsc::Sender, -} - -impl IntentInfo { - fn merge_interests(&mut self, interests: &InterestMap) { - for (auth, aois) in interests.iter() { - self.interests - .entry(auth.namespace()) - .or_default() - .extend(aois.clone()); - } - } - - async fn handle_event(&mut self, event: &EventKind) -> Result { - let send = |event: EventKind| async { - self.event_tx.send(event).await.map_err(|_| ReceiverDropped) - }; - - let stay_alive = match &event { - EventKind::CapabilityIntersection { namespace, .. } => { - if self.interests.contains_key(namespace) { - send(event.clone()).await?; - } - true - } - EventKind::InterestIntersection { area, namespace } => { - if let Some(interests) = self.interests.get(namespace) { - let matches = interests - .iter() - .any(|x| x.area.has_intersection(&area.area)); - if matches { - send(event.clone()).await?; - } - } - true - } - EventKind::Reconciled { area, namespace } => { - if let Some(interests) = self.interests.get_mut(namespace) { - let matches = interests - .iter() - .any(|x| x.area.has_intersection(&area.area)); - if matches { - send(event.clone()).await?; - interests.retain(|x| !area.area.includes_area(&x.area)); - if interests.is_empty() { - send(EventKind::ReconciledAll).await?; - } - } - } - true - } - EventKind::Closed { .. } => { - send(event.clone()).await?; - false - } - EventKind::ReconciledAll => true, - }; - Ok(stay_alive) - } -} - #[derive(Debug, thiserror::Error)] #[error("receiver dropped")] pub struct ReceiverDropped; -fn merge_interests(a: &mut InterestMap, b: InterestMap) { - for (cap, aois) in b.into_iter() { - a.entry(cap).or_default().extend(aois); - } -} - -fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { - let mut out = NamespaceInterests::new(); - for (cap, aois) in interests { - out.entry(cap.namespace()).or_default().extend(aois.clone()); - } - out -} - -async fn send_all( - senders: impl IntoIterator>>, - message: T, -) -> Vec>> { - let futs = senders.into_iter().map(|sender| { - let message = message.clone(); - async move { sender.borrow().send(message).await } - }); - futures_buffered::join_all(futs).await -} - #[cfg(test)] mod tests { use bytes::Bytes; @@ -870,7 +470,7 @@ mod tests { actor::ActorHandle, auth::{CapSelector, DelegateTo}, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, - net::run, + // net::run, proto::{ grouping::{Area, AreaOfInterest, ThreeDRange}, keys::{NamespaceId, NamespaceKind, UserId}, @@ -935,10 +535,10 @@ mod tests { assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Closed { result: Ok(()) } - ); + // assert_eq!( + // intent.next().await.unwrap(), + // EventKind::Closed { result: Ok(()) } + // ); assert!(intent.next().await.is_none()); } @@ -980,10 +580,10 @@ mod tests { assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Closed { result: Ok(()) } - ); + // assert_eq!( + // intent.next().await.unwrap(), + // EventKind::Closed { result: Ok(()) } + // ); assert!(intent.next().await.is_none()); } @@ -1010,11 +610,11 @@ mod tests { insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::Live); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + println!("start"); assert_eq!( intent.next().await.unwrap(), EventKind::CapabilityIntersection { @@ -1022,6 +622,7 @@ mod tests { area: Area::full(), } ); + println!("first in!"); assert_eq!( intent.next().await.unwrap(), EventKind::InterestIntersection { @@ -1062,10 +663,6 @@ mod tests { intent.close().await; assert!(intent.next().await.is_none(),); - // assert_eq!( - // intent.next().await.unwrap(), - // EventKind::Closed { result: Ok(()) } - // ); shutdown(); Ok(()) diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs new file mode 100644 index 0000000000..957a0a79ae --- /dev/null +++ b/iroh-willow/src/session/intents.rs @@ -0,0 +1,443 @@ +use std::{ + collections::{hash_map, HashMap, HashSet, VecDeque}, + future::Future, + sync::Arc, +}; + +use anyhow::{anyhow, Context, Result}; +use futures_lite::{Stream, StreamExt}; +use futures_util::{FutureExt, Sink}; +use genawaiter::rc::Co; +use iroh_net::{ + dialer::Dialer, endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId, +}; +use tokio::{ + io::Interest, + sync::{mpsc, oneshot}, + task::{AbortHandle, JoinHandle, JoinSet}, +}; +use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; +use tokio_util::sync::{CancellationToken, PollSender}; +use tracing::{debug, error_span, Instrument}; + +use crate::{ + actor::{Actor, ActorHandle, SessionHandle}, + auth::{Auth, InterestMap}, + net::{setup, ALPN}, + proto::{ + grouping::{Area, AreaOfInterest}, + keys::NamespaceId, + sync::{ReadAuthorisation, ReadCapability}, + }, + session::{ + error::ChannelReceiverDropped, + events::{EventKind, ReceiverDropped}, + Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate, + }, + store::traits::Storage, + util::gen_stream::GenStream, +}; + +type NamespaceInterests = HashMap>; + +const INTENT_UPDATE_CAP: usize = 16; +const INTENT_EVENT_CAP: usize = 64; + +pub type IntentId = u64; + +type Sender = mpsc::Sender; +type Receiver = mpsc::Receiver; + +#[derive(Debug)] +pub struct IntentData { + pub init: SessionInit, + pub channels: IntentChannels, +} + +impl IntentData { + pub(super) async fn send_abort(self, error: Arc) { + self.channels + .event_tx + .send(EventKind::Abort { error }) + .await + .ok(); + } +} + +#[derive(Debug)] +pub enum Input { + EmitEvent(EventKind), + SubmitIntent(IntentData), +} + +#[derive(Debug)] +pub enum Output { + SubmitInterests(InterestMap), + AllIntentsDropped, +} + +#[derive(derive_more::Debug)] +pub struct IntentDispatcher { + pending_intents: VecDeque, + intents: HashMap, + auth: Auth, + #[debug("StreamMap")] + intent_update_rx: StreamMap>>, + next_intent_id: u64, + complete_areas: NamespaceInterests, +} + +impl IntentDispatcher { + pub fn new(auth: Auth, initial_intents: impl IntoIterator) -> Self { + Self { + pending_intents: initial_intents.into_iter().collect(), + intents: Default::default(), + auth, + intent_update_rx: Default::default(), + next_intent_id: 0, + complete_areas: Default::default(), + } + } + + pub async fn abort_all(&self, error: Arc) { + let _ = futures_buffered::join_all( + self.pending_intents + .iter() + .map(|intent| &intent.channels.event_tx) + .chain(self.intents.values().map(|intent| &intent.event_tx)) + .map(|event_tx| { + let error = error.clone(); + async move { event_tx.send(EventKind::Abort { error }).await } + }), + ) + .await; + } + + /// Run the [`IntentDispatcher`]. + /// + /// The returned stream is a generator, so it must be polled repeatedly to progress. + pub fn run_gen( + &mut self, + inbox: impl Stream + 'static, + ) -> GenStream> + '_> { + GenStream::new(|co| self.run(co, inbox)) + } + + pub async fn run( + &mut self, + co: Co, + inbox: impl Stream, + ) -> Result<(), Error> { + tokio::pin!(inbox); + + while let Some(intent) = self.pending_intents.pop_front() { + self.submit_intent(&co, intent).await?; + } + debug!("submitted initial intents, start loop"); + loop { + tokio::select! { + input = inbox.next() => { + tracing::debug!(?input, "tick: inbox"); + let Some(input) = input else { + break; + }; + match input { + Input::SubmitIntent(data) => self.submit_intent(&co, data).await?, + Input::EmitEvent(event) => self.emit_event(&co, event).await, + } + } + Some((intent_id, event)) = self.intent_update_rx.next(), if !self.intent_update_rx.is_empty() => { + tracing::debug!(?intent_id, ?event, "tick: intent_update"); + match event { + Some(event) => { + // Received an intent update. + if let Err(err) = self.update_intent(&co, intent_id, event).await { + tracing::warn!(%intent_id, ?err, "failed to update intent"); + } + }, + None => { + // The intent update sender was dropped: Cancel the intent if the event + // receiver is dropped too. + self.intent_update_rx.remove(&intent_id); + let events_tx_closed = self.intents.get(&intent_id).map(|intent| intent.events_closed()).unwrap_or(true); + if events_tx_closed { + self.cancel_intent(&co, intent_id).await; + } + } + } + } + } + } + Ok(()) + } + + async fn submit_intent(&mut self, co: &Co, intent: IntentData) -> Result<(), Error> { + let interests = self.auth.resolve_interests(intent.init.interests)?; + let intent_id = { + let intent_id = self.next_intent_id; + self.next_intent_id += 1; + intent_id + }; + let IntentChannels { + event_tx, + update_rx, + } = intent.channels; + let mut info = IntentInfo { + interests: flatten_interests(&interests), + mode: intent.init.mode, + event_tx, + }; + // Send out reconciled events for already-complete areas. + for (namespace, areas) in &self.complete_areas { + for area in areas { + info.on_reconciled(*namespace, area).await?; + } + } + + if !info.is_complete() { + self.intents.insert(intent_id, info); + self.intent_update_rx.insert( + intent_id, + StreamNotifyClose::new(ReceiverStream::new(update_rx)), + ); + co.yield_(Output::SubmitInterests(interests)).await; + } + + Ok(()) + } + + async fn emit_event(&mut self, co: &Co, event: EventKind) { + if let EventKind::Reconciled { namespace, area } = &event { + self.complete_areas + .entry(*namespace) + .or_default() + .insert(area.clone()); + } + let send_futs = self + .intents + .iter_mut() + .map(|(id, info)| info.handle_event(&event).map(|res| (*id, res))); + let send_res = futures_buffered::join_all(send_futs).await; + for (id, res) in send_res.into_iter() { + match res { + Err(ReceiverDropped) => { + if !self.intent_update_rx.contains_key(&id) { + self.cancel_intent(co, id).await; + } + } + Ok(is_complete) => { + if is_complete { + self.cancel_intent(co, id).await; + } + } + } + } + } + + pub async fn update_intent( + &mut self, + co: &Co, + intent_id: u64, + update: IntentUpdate, + ) -> Result<()> { + debug!(?intent_id, ?update, "intent update"); + match update { + IntentUpdate::AddInterests(interests) => { + let add_interests = self.auth.resolve_interests(interests)?; + let Some(intent_info) = self.intents.get_mut(&intent_id) else { + anyhow::bail!("invalid intent id"); + }; + intent_info.merge_interests(&add_interests); + co.yield_(Output::SubmitInterests(add_interests)).await; + } + IntentUpdate::Close => { + self.cancel_intent(co, intent_id).await; + } + } + Ok(()) + } + + pub async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { + debug!(?intent_id, "cancel intent"); + self.intent_update_rx.remove(&intent_id); + self.intents.remove(&intent_id); + if self.intents.is_empty() { + co.yield_(Output::AllIntentsDropped).await; + } + } +} + +#[derive(Debug)] +pub enum IntentUpdate { + AddInterests(Interests), + Close, +} + +#[derive(Debug)] +pub struct IntentHandle { + event_rx: Receiver, + update_tx: Sender, +} + +#[derive(Debug)] +pub struct IntentChannels { + event_tx: Sender, + update_rx: Receiver, +} + +impl IntentHandle { + pub fn new(init: SessionInit) -> (Self, IntentData) { + let (handle, channels) = Self::with_cap(INTENT_EVENT_CAP, INTENT_UPDATE_CAP); + let data = IntentData { init, channels }; + (handle, data) + } + + pub fn with_cap(event_cap: usize, update_cap: usize) -> (Self, IntentChannels) { + let (event_tx, event_rx) = mpsc::channel(event_cap); + let (update_tx, update_rx) = mpsc::channel(update_cap); + ( + IntentHandle { + event_rx, + update_tx, + }, + IntentChannels { + event_tx, + update_rx, + }, + ) + } + pub fn split(self) -> (PollSender, ReceiverStream) { + ( + PollSender::new(self.update_tx), + ReceiverStream::new(self.event_rx), + ) + } + + pub async fn next(&mut self) -> Option { + self.event_rx.recv().await + } + + pub async fn complete(&mut self) -> Result<(), Arc> { + while let Some(event) = self.event_rx.recv().await { + if let EventKind::Abort { error } = event { + return Err(error); + } + } + Ok(()) + } + + pub async fn add_interests(&self, interests: impl Into) -> Result<()> { + self.update_tx + .send(IntentUpdate::AddInterests(interests.into())) + .await?; + Ok(()) + } + + pub async fn close(&self) { + self.update_tx.send(IntentUpdate::Close).await.ok(); + } +} + +#[derive(Debug)] +pub(super) struct IntentInfo { + interests: NamespaceInterests, + mode: SessionMode, + event_tx: Sender, +} + +impl IntentInfo { + fn merge_interests(&mut self, interests: &InterestMap) { + for (auth, aois) in interests.iter() { + self.interests + .entry(auth.namespace()) + .or_default() + .extend(aois.clone()); + } + } + + fn is_complete(&self) -> bool { + self.interests.is_empty() && !self.mode.is_live() + } + + fn events_closed(&self) -> bool { + self.event_tx.is_closed() + } + + async fn on_reconciled(&mut self, namespace: NamespaceId, area: &AreaOfInterest) -> Result<()> { + if self.complete_area_if_matches(&namespace, &area.area) { + self.send(EventKind::Reconciled { + namespace, + area: area.clone(), + }) + .await?; + if self.interests.is_empty() { + self.send(EventKind::ReconciledAll).await? + } + } + Ok(()) + } + + fn matches_area(&self, namespace: &NamespaceId, area: &Area) -> bool { + self.interests + .get(namespace) + .map(|interests| interests.iter().any(|x| x.area.has_intersection(area))) + .unwrap_or(false) + } + + fn complete_area_if_matches(&mut self, namespace: &NamespaceId, area: &Area) -> bool { + let mut namespace_complete = false; + let mut matches = false; + if let Some(interests) = self.interests.get_mut(namespace) { + if interests.iter().any(|x| x.area.has_intersection(area)) { + matches = true; + interests.retain(|x| !area.includes_area(&x.area)); + if interests.is_empty() { + namespace_complete = true; + } + } + } + if namespace_complete { + self.interests.remove(namespace); + } + matches + } + + pub(super) async fn handle_event( + &mut self, + event: &EventKind, + ) -> Result { + let matches = match event { + EventKind::CapabilityIntersection { namespace, .. } => { + self.interests.contains_key(namespace) + } + EventKind::InterestIntersection { area, namespace } => { + self.matches_area(namespace, &area.area) + } + EventKind::Reconciled { area, namespace } => { + self.complete_area_if_matches(namespace, &area.area) + } + EventKind::Abort { .. } => true, + EventKind::ReconciledAll => false, + }; + let is_reconciled = matches!(event, EventKind::Reconciled { .. }); + if matches { + self.send(event.clone()).await?; + if is_reconciled && self.interests.is_empty() { + self.send(EventKind::ReconciledAll).await? + } + } + Ok(self.is_complete()) + } + + async fn send(&self, event: EventKind) -> Result<(), ReceiverDropped> { + self.event_tx.send(event).await.map_err(|_| ReceiverDropped) + } +} + +fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { + let mut out = NamespaceInterests::new(); + for (cap, aois) in interests { + out.entry(cap.namespace()).or_default().extend(aois.clone()); + } + out +} diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index ce6a74049a..5277d6fb07 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -55,6 +55,7 @@ pub struct PaiIntersection { #[derive(Debug)] pub enum Input { + Established, SubmitAuthorisation(ReadAuthorisation), ReceivedMessage(Result), ReceivedSubspaceCapRequest(IntersectionHandle), @@ -79,6 +80,7 @@ pub struct PaiFinder { their_intersection_handles: ResourceMap, requested_subspace_cap_handles: HashSet, submitted: HashSet, + pending: Option>, } impl PaiFinder { @@ -86,6 +88,10 @@ impl PaiFinder { /// /// The returned stream is a generator, so it must be polled repeatedly for the [`PaiFinder`] /// to progress. + /// + /// Submit inputs through the inbox. The [`PaiFinder`] will not yield any outputs until + /// [`Input::Established`]. Authorisations submitted prior are queued and will be yielded after + /// the establish input. pub fn run_gen( inbox: impl Stream + Unpin, ) -> impl Stream> { @@ -114,6 +120,7 @@ impl PaiFinder { fragments_info: Default::default(), requested_subspace_cap_handles: Default::default(), submitted: Default::default(), + pending: Some(Default::default()), } } @@ -127,7 +134,20 @@ impl PaiFinder { async fn input(&mut self, input: Input) -> Result<(), Error> { match input { - Input::SubmitAuthorisation(auth) => self.submit_authorisation(auth).await, + Input::SubmitAuthorisation(auth) => { + if let Some(pending) = self.pending.as_mut() { + pending.insert(auth); + } else { + self.submit_authorisation(auth).await; + } + } + Input::Established => { + if let Some(mut pending) = self.pending.take() { + for authorisation in pending.drain() { + self.submit_authorisation(authorisation).await; + } + } + } Input::ReceivedMessage(message) => match message? { IntersectionMessage::BindFragment(message) => self.receive_bind(message).await?, IntersectionMessage::ReplyFragment(message) => self.receive_reply(message).await?, @@ -644,6 +664,7 @@ mod tests { pub fn new(span: Span) -> Self { let (input, input_rx) = flume::bounded(1); let (output_tx, output) = flume::bounded(1); + input.try_send(Input::Established).expect("has capacity"); let outbox = output_tx .into_sink() .sink_map_err(|_| Error::InvalidState("failed to send")); diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 50ffe3a1fa..e097c0a6a2 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc}; +use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc, sync::Arc}; use futures_concurrency::{ future::{Join, TryJoin}, @@ -25,6 +25,7 @@ use crate::{ channels::{ChannelSenders, LogicalChannelReceivers}, data, events::{EventKind, EventSender, SessionEvent}, + intents::{self, IntentData}, pai_finder::{self as pai, PaiFinder, PaiIntersection}, reconciler, static_tokens::StaticTokens, @@ -47,18 +48,37 @@ use super::{ const INITIAL_GUARANTEES: u64 = u64::MAX; +// struct Session { +// session_id: SessionId, +// our_role: Role, +// initial_transmission: InitialTransmission, +// event_sender: EventSender, +// cancel_token: CancellationToken, +// } + pub async fn run_session( store: Store, channels: Channels, cancel_token: CancellationToken, session_id: SessionId, our_role: Role, - init: SessionInit, + // init: SessionInit, + initial_intents: Vec, initial_transmission: InitialTransmission, event_sender: EventSender, update_receiver: impl Stream + Unpin + 'static, -) -> Result<(), Error> { - debug!(role = ?our_role, mode = ?init.mode, "start session"); +) -> Result<(), Arc> { + // TODO: update mode to live on intent changes + let mode = initial_intents + .iter() + .fold(SessionMode::ReconcileOnce, |cur, intent| { + match intent.init.mode { + SessionMode::ReconcileOnce => cur, + SessionMode::Live => SessionMode::Live, + } + }); + + debug!(role = ?our_role, ?mode, "start session"); let Channels { send: channel_sender, recv, @@ -92,32 +112,46 @@ pub async fn run_session( ); let tokens = StaticTokens::default(); - // Setup a channels for communication between the loops. - let (pai_inbox, pai_inbox_rx) = channel::(2); - let pai_inbox_rx = Cancelable::new(pai_inbox_rx, cancel_token.clone()); - - let (intersection_inbox, intersection_inbox_rx) = channel::(2); - let intersection_inbox_rx = Cancelable::new(intersection_inbox_rx, cancel_token.clone()); - - let (rec_inbox, rec_inbox_rx) = channel::(2); - let rec_inbox_rx = Cancelable::new(rec_inbox_rx, cancel_token.clone()); + // Setup channels for communication between the loops. + let (pai_inbox, pai_inbox_rx) = cancelable_channel::(2, cancel_token.clone()); + let (intersection_inbox, intersection_inbox_rx) = + cancelable_channel::(2, cancel_token.clone()); + let (reconciler_inbox, reconciler_inbox_rx) = + cancelable_channel::(2, cancel_token.clone()); + let (intents_inbox, intents_inbox_rx) = + cancelable_channel::(2, cancel_token.clone()); // Setup data channels only if in live mode. - let (data_inbox, data_inbox_rx) = if init.mode == SessionMode::Live { - let (data_inbox, data_inbox_rx) = channel::(2); - let data_inbox_rx = Cancelable::new(data_inbox_rx, cancel_token.clone()); + // TODO: Adapt to changing mode. + let (data_inbox, data_inbox_rx) = if mode == SessionMode::Live { + let (data_inbox, data_inbox_rx) = + cancelable_channel::(2, cancel_token.clone()); (Some(data_inbox), Some(data_inbox_rx)) } else { (None, None) }; - let initial_interests_fut = with_span(error_span!("init"), async { - caps.revealed().await; - let interests = store.auth().resolve_interests(init.interests)?; - intersection_inbox - .send(aoi_finder::Input::AddInterests(interests)) - .await?; - Result::<_, Error>::Ok(()) + let mut intents = intents::IntentDispatcher::new(store.auth().clone(), initial_intents); + let intents_fut = with_span(error_span!("intents"), async { + use intents::Output; + let mut intents_gen = intents.run_gen(intents_inbox_rx); + while let Some(output) = intents_gen.try_next().await? { + debug!(?output, "yield"); + match output { + Output::SubmitInterests(interests) => { + intersection_inbox + .send(aoi_finder::Input::AddInterests(interests)) + .await?; + } + // TODO: Add Output::SetMode(SessionMode) to propagate mode changes. + Output::AllIntentsDropped => { + debug!("close session (all intents dropped)"); + cancel_token.cancel(); + } + } + } + debug!("done"); + Ok(()) }); let data_loop = with_span(error_span!("data"), async { @@ -150,11 +184,9 @@ pub async fn run_session( let update_loop = with_span(error_span!("update"), async { while let Some(update) = update_receiver.next().await { match update { - SessionUpdate::AddInterests(interests) => { - caps.revealed().await; - let interests = store.auth().resolve_interests(interests)?; - intersection_inbox - .send(aoi_finder::Input::AddInterests(interests)) + SessionUpdate::SubmitIntent(data) => { + intents_inbox + .send(intents::Input::SubmitIntent(data)) .await?; } } @@ -176,12 +208,11 @@ pub async fn run_session( Output::AoiIntersection(intersection) => { let area = intersection.intersection.clone(); let namespace = intersection.namespace; - rec_inbox + reconciler_inbox .send(reconciler::Input::AoiIntersection(intersection.clone())) .await?; - event_sender - .send(EventKind::InterestIntersection { namespace, area }) - .await?; + let event = EventKind::InterestIntersection { namespace, area }; + intents_inbox.send(intents::Input::EmitEvent(event)).await?; if let Some(data_inbox) = &data_inbox { data_inbox .send(data::Input::AoiIntersection(intersection.clone())) @@ -197,6 +228,12 @@ pub async fn run_session( Ok(()) }); + let pai_init = with_span(error_span!("pai-init"), async { + caps.revealed().await; + pai_inbox.send(pai::Input::Established).await?; + Ok(()) + }); + let pai_loop = with_span(error_span!("pai"), async { use pai::Output; let inbox = pai_inbox_rx.merge(intersection_recv.map(pai::Input::ReceivedMessage)); @@ -211,7 +248,7 @@ pub async fn run_session( }; ( intersection_inbox.send(aoi_finder::Input::PaiIntersection(intersection)), - event_sender.send(event), + intents_inbox.send(intents::Input::EmitEvent(event)), ) .try_join() .await?; @@ -228,7 +265,7 @@ pub async fn run_session( let reconciler_loop = with_span(error_span!("reconciler"), async { use reconciler::Output; let mut gen = Reconciler::run_gen( - rec_inbox_rx, + reconciler_inbox_rx, store.clone(), reconciliation_recv, tokens.clone(), @@ -239,13 +276,17 @@ pub async fn run_session( while let Some(output) = gen.try_next().await? { match output { Output::ReconciledArea { namespace, area } => { - event_sender - .send(EventKind::Reconciled { namespace, area }) + intents_inbox + .send(intents::Input::EmitEvent(EventKind::Reconciled { + namespace, + area, + })) .await?; } Output::ReconciledAll => { // Stop session if not in live mode; - if !init.mode.is_live() { + if !mode.is_live() { + debug!("close session (reconciliation finished and not in live mode)"); cancel_token.cancel(); break; } @@ -274,8 +315,19 @@ pub async fn run_session( }); let control_loop = with_span(error_span!("control"), async { - let res = control_loop(control_recv, our_role, &caps, &channel_sender, &pai_inbox).await; - cancel_token.cancel(); + let res = control_loop( + control_recv, + our_role, + &caps, + &channel_sender, + &pai_inbox, + &event_sender, + ) + .await; + if !cancel_token.is_cancelled() { + debug!("close session (control channel closed)"); + cancel_token.cancel(); + } res }); @@ -299,10 +351,11 @@ pub async fn run_session( }); let result = ( - initial_interests_fut, + intents_fut, control_loop, data_loop, update_loop, + pai_init, pai_loop, intersection_loop, reconciler_loop, @@ -313,10 +366,10 @@ pub async fn run_session( .try_join() .await; - match &result { - Ok(_) => debug!("session complete"), - Err(err) => debug!(?err, "session failed"), - } + let result = match result { + Ok(_) => Ok(()), + Err(err) => Err(Arc::new(err)), + }; // Unsubscribe from the store. This stops the data send task. store.entries().unsubscribe(&session_id); @@ -325,8 +378,24 @@ pub async fn run_session( // This will stop the network send loop after all pending data has been sent. channel_sender.close_all(); - debug!(success = result.is_ok(), "session complete"); - result.map(|_| ()) + event_sender + .send(SessionEvent::Complete { + result: result.clone(), + }) + .await + .ok(); + + match result { + Ok(_) => { + debug!("session complete"); + Ok(()) + } + Err(error) => { + debug!(?error, "session failed"); + intents.abort_all(error.clone()).await; + Err(error) + } + } } async fn control_loop( @@ -335,6 +404,7 @@ async fn control_loop( caps: &Capabilities, sender: &ChannelSenders, pai_inbox: &Sender, + event_sender: &EventSender, ) -> Result<(), Error> { // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; @@ -354,6 +424,7 @@ async fn control_loop( match message { Message::CommitmentReveal(msg) => { caps.received_commitment_reveal(our_role, msg.nonce)?; + event_sender.send(SessionEvent::Revealed).await?; } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; @@ -361,11 +432,17 @@ async fn control_loop( sender.get_logical(channel).add_guarantees(amount); } Message::PaiRequestSubspaceCapability(msg) => { + if !caps.is_revealed() { + return Err(Error::InvalidMessageInCurrentState); + } pai_inbox .send(pai::Input::ReceivedSubspaceCapRequest(msg.handle)) .await?; } Message::PaiReplySubspaceCapability(msg) => { + if !caps.is_revealed() { + return Err(Error::InvalidMessageInCurrentState); + } caps.verify_subspace_cap(&msg.capability, &msg.signature)?; pai_inbox .send(pai::Input::ReceivedVerifiedSubspaceCapReply( @@ -381,9 +458,20 @@ async fn control_loop( Ok(()) } -fn channel(cap: usize) -> (Sender, ReceiverStream) { +// fn channel(cap: usize) -> (Sender, ReceiverStream) { +// let (tx, rx) = mpsc::channel(cap); +// (Sender(tx), ReceiverStream::new(rx)) +// } + +fn cancelable_channel( + cap: usize, + cancel_token: CancellationToken, +) -> (Sender, Cancelable>) { let (tx, rx) = mpsc::channel(cap); - (Sender(tx), ReceiverStream::new(rx)) + ( + Sender(tx), + Cancelable::new(ReceiverStream::new(rx), cancel_token), + ) } #[derive(Debug)] @@ -401,7 +489,10 @@ impl Sender { } } -async fn with_span(span: Span, fut: impl Future>) -> Result<(), Error> { +async fn with_span( + span: Span, + fut: impl Future>, +) -> Result { async move { tracing::debug!("start"); let res = fut.await; diff --git a/iroh-willow/src/util/gen_stream.rs b/iroh-willow/src/util/gen_stream.rs index 97ed86fe86..2999c93a9e 100644 --- a/iroh-willow/src/util/gen_stream.rs +++ b/iroh-willow/src/util/gen_stream.rs @@ -10,18 +10,19 @@ use genawaiter::{ }; #[derive(derive_more::Debug)] -pub struct GenStream +pub struct GenStream where - Fut: Future>, + Fut: Future>, { #[debug("Gen")] gen: Gen, is_complete: bool, + final_output: Option, } -impl GenStream +impl GenStream where - Fut: Future>, + Fut: Future>, { pub fn new(producer: impl FnOnce(Co) -> Fut) -> Self { Self::from_gen(Gen::new(producer)) @@ -31,13 +32,19 @@ where Self { gen, is_complete: false, + final_output: None, } } + + pub fn final_output(self) -> Option { + self.final_output + } } -impl futures_lite::Stream for GenStream +impl futures_lite::Stream for GenStream where - Fut: Future>, + Fut: Future>, + O: Unpin, { type Item = Result; @@ -45,18 +52,21 @@ where if self.is_complete { return Poll::Ready(None); } - let item = { + let (item, final_output) = { let mut fut = self.gen.async_resume(); let out = std::task::ready!(Pin::new(&mut fut).poll(cx)); match out { - GeneratorState::Yielded(output) => Some(Ok(output)), - GeneratorState::Complete(Ok(())) => None, - GeneratorState::Complete(Err(err)) => Some(Err(err)), + GeneratorState::Yielded(output) => (Some(Ok(output)), Option::None), + GeneratorState::Complete(Ok(final_output)) => (None, Some(final_output)), + GeneratorState::Complete(Err(err)) => (Some(Err(err)), None), } }; if matches!(item, None | Some(Err(_))) { self.is_complete = true; } + if let Some(final_output) = final_output { + self.final_output = Some(final_output); + }; Poll::Ready(item) } } diff --git a/iroh-willow/src/util/stream.rs b/iroh-willow/src/util/stream.rs index 3ac9cc6776..96bb841d54 100644 --- a/iroh-willow/src/util/stream.rs +++ b/iroh-willow/src/util/stream.rs @@ -14,6 +14,7 @@ use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; #[derive(Debug)] pub struct Cancelable { stream: S, + // TODO: Don't allocate here. cancelled: Pin>, is_cancelled: bool, } @@ -31,18 +32,20 @@ impl Cancelable { impl Stream for Cancelable { type Item = S::Item; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.is_cancelled { - return Poll::Ready(None); - } match Pin::new(&mut self.stream).poll_next(cx) { Poll::Ready(r) => Poll::Ready(r), - Poll::Pending => match Pin::new(&mut self.cancelled).poll(cx) { - Poll::Ready(()) => { - self.is_cancelled = true; - Poll::Ready(None) + Poll::Pending => { + if self.is_cancelled { + return Poll::Ready(None); + } + match Pin::new(&mut self.cancelled).poll(cx) { + Poll::Ready(()) => { + self.is_cancelled = true; + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, } - Poll::Pending => Poll::Pending, - }, + } } } } From 1bb3e17506dc145c73af4bd4609d5702f30a9ef8 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 02:00:04 +0200 Subject: [PATCH 088/198] refactor: make engine work fully --- iroh-willow/src/engine.rs | 419 ++++++++++++-- iroh-willow/src/{ => engine}/actor.rs | 224 +++---- iroh-willow/src/engine/peer_manager.rs | 287 +++++++++ iroh-willow/src/lib.rs | 3 +- iroh-willow/src/net.rs | 126 ++-- iroh-willow/src/session.rs | 80 ++- iroh-willow/src/session/events.rs | 771 ------------------------- iroh-willow/src/session/intents.rs | 322 +++++++---- iroh-willow/src/session/reconciler.rs | 2 +- iroh-willow/src/session/run.rs | 60 +- 10 files changed, 1094 insertions(+), 1200 deletions(-) rename iroh-willow/src/{ => engine}/actor.rs (68%) create mode 100644 iroh-willow/src/engine/peer_manager.rs delete mode 100644 iroh-willow/src/session/events.rs diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 3256f5a596..da04319d36 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -1,46 +1,373 @@ -// use anyhow::Result; -// use iroh_blobs::protocol::ALPN; -// use iroh_net::{endpoint::Connection, Endpoint, NodeId}; -// -// use crate::{ -// actor::ActorHandle, -// net, -// session::{Role, SessionInit}, -// store::memory, -// }; -// -// #[derive(Debug, Clone)] -// pub struct Engine { -// endpoint: Endpoint, -// handle: ActorHandle, -// } -// -// impl Engine { -// pub fn new(endpoint: Endpoint, handle: ActorHandle) -> Self { -// Self { endpoint, handle } -// } -// -// pub fn memory(endpoint: Endpoint) -> Self { -// let me = endpoint.node_id(); -// let payloads = iroh_blobs::store::mem::Store::default(); -// let handle = ActorHandle::spawn(move || memory::Store::new(payloads), me); -// Self::new(endpoint, handle) -// } -// -// pub async fn handle_connection(&self, conn: Connection, init: SessionInit) -> Result<()> { -// let our_role = Role::Betty; -// let handle = self.handle.clone(); -// let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; -// session.join().await?; -// Ok(()) -// } -// -// pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result<()> { -// let our_role = Role::Alfie; -// let conn = self.endpoint.connect_by_node_id(&peer, ALPN).await?; -// let handle = self.handle.clone(); -// let mut session = net::run(self.endpoint.node_id(), handle, conn, our_role, init).await?; -// session.join().await?; -// Ok(()) -// } -// } +use anyhow::Result; +use iroh_net::{endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId}; +use tokio::sync::mpsc; +use tracing::{error_span, Instrument}; + +use crate::{ + session::{ + intents::{Intent, IntentHandle}, + SessionInit, + }, + store::traits::Storage, +}; + +mod actor; +mod peer_manager; + +pub use self::actor::ActorHandle; +pub use self::peer_manager::PeerManager; + +const PEER_MANAGER_INBOX_CAP: usize = 128; + +#[derive(Debug, Clone)] +pub struct Engine { + actor_handle: ActorHandle, + peer_manager_inbox: mpsc::Sender, + _peer_manager_handle: SharedAbortingJoinHandle>, +} + +impl Engine { + pub fn spawn( + endpoint: Endpoint, + create_store: impl 'static + Send + FnOnce() -> S, + ) -> Self { + let me = endpoint.node_id(); + let actor = ActorHandle::spawn(create_store, me); + let (pm_inbox_tx, pm_inbox_rx) = mpsc::channel(PEER_MANAGER_INBOX_CAP); + let peer_manager = PeerManager::new(actor.clone(), endpoint, pm_inbox_rx); + let peer_manager_handle = tokio::task::spawn( + async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } + .instrument(error_span!("peer_manager", me = me.fmt_short())), + ); + Engine { + actor_handle: actor, + peer_manager_inbox: pm_inbox_tx, + _peer_manager_handle: peer_manager_handle.into(), + } + } + + pub async fn handle_connection(&self, conn: Connection) -> Result<()> { + self.peer_manager_inbox + .send(peer_manager::Input::HandleConnection { conn }) + .await?; + Ok(()) + } + + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { + let (intent, handle) = Intent::new(init); + self.peer_manager_inbox + .send(peer_manager::Input::SubmitIntent { peer, intent }) + .await?; + Ok(handle) + } +} + +impl std::ops::Deref for Engine { + type Target = ActorHandle; + + fn deref(&self) -> &Self::Target { + &self.actor_handle + } +} + +#[cfg(test)] +mod tests { + use bytes::Bytes; + use iroh_net::{Endpoint, NodeAddr, NodeId}; + use rand::SeedableRng; + use rand_chacha::ChaCha12Rng; + use std::collections::HashMap; + + use crate::{ + auth::{CapSelector, DelegateTo}, + engine::Engine, + form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, + net::ALPN, + proto::{ + grouping::{Area, AreaOfInterest, ThreeDRange}, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + willow::{Entry, InvalidPath, Path}, + }, + session::{intents::EventKind, Interests, Role, SessionInit, SessionMode}, + }; + + fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + + #[tokio::test(flavor = "multi_thread")] + async fn peer_manager_two_intents() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_two_intents"); + let ( + shutdown, + namespace, + (alfie, _alfie_node_id, _alfie_user), + (betty, betty_node_id, betty_user), + ) = create_and_setup_two(&mut rng).await?; + + insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; + + let task_foo = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"foo"]).unwrap(); + + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + let task_bar = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"bar"]).unwrap(); + + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + task_foo.await.unwrap(); + task_bar.await.unwrap(); + shutdown(); + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn peer_manager_update_intent() -> anyhow::Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_update_intent"); + let ( + shutdown, + namespace, + (alfie, _alfie_node_id, _alfie_user), + (betty, betty_node_id, betty_user), + ) = create_and_setup_two(&mut rng).await?; + + insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + + let path = Path::new(&[b"foo"]).unwrap(); + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::Live); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + println!("start"); + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + println!("first in!"); + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + let path = Path::new(&[b"bar"]).unwrap(); + let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + intent.add_interests(interests).await?; + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + intent.close().await; + + assert!(intent.next().await.is_none(),); + + shutdown(); + Ok(()) + } + + pub async fn create_and_setup_two( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<( + impl Fn(), + NamespaceId, + (Engine, NodeId, UserId), + (Engine, NodeId, UserId), + )> { + let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng).await?; + let (betty, betty_ep, betty_addr, betty_task) = create(rng).await?; + + let betty_node_id = betty_addr.node_id; + let alfie_node_id = alfie_addr.node_id; + alfie_ep.add_node_addr(betty_addr)?; + betty_ep.add_node_addr(alfie_addr)?; + + let (namespace_id, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + + let shutdown = move || { + betty_task.abort(); + alfie_task.abort(); + }; + Ok(( + shutdown, + namespace_id, + (alfie, alfie_node_id, alfie_user), + (betty, betty_node_id, betty_user), + )) + } + + pub async fn create( + rng: &mut rand_chacha::ChaCha12Rng, + ) -> anyhow::Result<( + Engine, + Endpoint, + iroh_net::NodeAddr, + tokio::task::JoinHandle>, + )> { + let endpoint = Endpoint::builder() + .secret_key(iroh_net::key::SecretKey::generate_with_rng(rng)) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let node_addr = endpoint.node_addr().await?; + let payloads = iroh_blobs::store::mem::Store::default(); + let create_store = move || crate::store::memory::Store::new(payloads); + let handle = Engine::spawn(endpoint.clone(), create_store); + let accept_task = tokio::task::spawn({ + let handle = handle.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let alpn = conn.alpn().await?; + if alpn != ALPN { + continue; + } + let conn = conn.await?; + handle.handle_connection(conn).await?; + } + Ok::<_, anyhow::Error>(()) + } + }); + Ok((handle, endpoint, node_addr, accept_task)) + } + + async fn setup_and_delegate( + alfie: &Engine, + betty: &Engine, + ) -> anyhow::Result<(NamespaceId, UserId, UserId)> { + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + Ok((namespace_id, user_alfie, user_betty)) + } + + async fn insert( + handle: &Engine, + namespace_id: NamespaceId, + user: UserId, + path: &[&[u8]], + bytes: impl Into, + ) -> anyhow::Result<()> { + let path = Path::new(path)?; + let entry = EntryForm::new_bytes(namespace_id, path, bytes); + handle.insert(entry, user).await?; + Ok(()) + } +} diff --git a/iroh-willow/src/actor.rs b/iroh-willow/src/engine/actor.rs similarity index 68% rename from iroh-willow/src/actor.rs rename to iroh-willow/src/engine/actor.rs index e928957533..291aa5052a 100644 --- a/iroh-willow/src/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -14,6 +14,7 @@ use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ auth::{CapSelector, CapabilityPack, DelegateTo, InterestMap}, form::{AuthForm, EntryForm, EntryOrForm}, + net::WillowConn, proto::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, @@ -22,9 +23,9 @@ use crate::{ willow::{AuthorisedEntry, Entry}, }, session::{ - events::{EventKind, EventSender, SessionEvent}, - intents::IntentData, - run_session, Channels, Error, Interests, Role, SessionId, SessionInit, SessionUpdate, + intents::{EventKind, Intent}, + run_session, Channels, Error, EventSender, Interests, Role, SessionEvent, SessionHandle, + SessionId, SessionInit, SessionUpdate, }, store::{ entry::EntryOrigin, @@ -40,7 +41,7 @@ pub const SESSION_UPDATE_CHANNEL_CAP: usize = 64; #[derive(Debug, Clone)] pub struct ActorHandle { - tx: flume::Sender, + inbox_tx: flume::Sender, join_handle: Arc>>, } @@ -53,7 +54,7 @@ impl ActorHandle { create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, ) -> ActorHandle { - let (tx, rx) = flume::bounded(INBOX_CAP); + let (inbox_tx, inbox_rx) = flume::bounded(INBOX_CAP); let join_handle = std::thread::Builder::new() .name("willow-actor".to_string()) .spawn(move || { @@ -62,29 +63,25 @@ impl ActorHandle { let store = (create_store)(); let store = Store::new(store); - let actor = Actor { - store, - sessions: Default::default(), - inbox_rx: rx, - next_session_id: 0, - session_tasks: Default::default(), - tasks: Default::default(), - }; + let actor = Actor::new(store, inbox_rx); if let Err(error) = actor.run() { error!(?error, "willow actor failed"); }; }) .expect("failed to spawn willow-actor thread"); let join_handle = Arc::new(Some(join_handle)); - ActorHandle { tx, join_handle } + ActorHandle { + inbox_tx, + join_handle, + } } - pub async fn send(&self, action: ToActor) -> Result<()> { - self.tx.send_async(action).await?; + pub async fn send(&self, action: Input) -> Result<()> { + self.inbox_tx.send_async(action).await?; Ok(()) } pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::IngestEntry { + self.send(Input::IngestEntry { authorised_entry, origin: EntryOrigin::Local, reply, @@ -96,7 +93,7 @@ impl ActorHandle { pub async fn insert_entry(&self, entry: Entry, auth: impl Into) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::InsertEntry { + self.send(Input::InsertEntry { entry: EntryOrForm::Entry(entry), auth: auth.into(), reply, @@ -112,7 +109,7 @@ impl ActorHandle { authorisation: impl Into, ) -> Result<(Entry, bool)> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::InsertEntry { + self.send(Input::InsertEntry { entry: EntryOrForm::Form(form), auth: authorisation.into(), reply, @@ -125,7 +122,7 @@ impl ActorHandle { pub async fn insert_secret(&self, secret: impl Into) -> Result<()> { let secret = secret.into(); let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::InsertSecret { secret, reply }).await?; + self.send(Input::InsertSecret { secret, reply }).await?; reply_rx.await??; Ok(()) } @@ -136,7 +133,7 @@ impl ActorHandle { range: ThreeDRange, ) -> Result>> { let (tx, rx) = flume::bounded(1024); - self.send(ToActor::GetEntries { + self.send(Input::GetEntries { namespace, reply: tx, range, @@ -147,20 +144,12 @@ impl ActorHandle { pub async fn init_session( &self, - peer: NodeId, - our_role: Role, - initial_transmission: InitialTransmission, - channels: Channels, - // init: SessionInit, - intents: Vec, + conn: WillowConn, + intents: Vec, ) -> Result { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::InitSession { - our_role, - initial_transmission, - peer, - channels, - // init, + self.send(Input::InitSession { + conn, intents, reply, }) @@ -176,14 +165,14 @@ impl ActorHandle { owner: UserId, ) -> Result { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::CreateNamespace { kind, owner, reply }) + self.send(Input::CreateNamespace { kind, owner, reply }) .await?; reply_rx.await? } pub async fn create_user(&self) -> Result { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::CreateUser { reply }).await?; + self.send(Input::CreateUser { reply }).await?; reply_rx.await? } @@ -194,7 +183,7 @@ impl ActorHandle { to: DelegateTo, ) -> Result> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::DelegateCaps { + self.send(Input::DelegateCaps { from, access_mode, to, @@ -207,20 +196,20 @@ impl ActorHandle { pub async fn import_caps(&self, caps: Vec) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::ImportCaps { caps, reply }).await?; + self.send(Input::ImportCaps { caps, reply }).await?; reply_rx.await? } pub async fn resolve_interests(&self, interests: Interests) -> Result { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::ResolveInterests { interests, reply }) + self.send(Input::ResolveInterests { interests, reply }) .await?; reply_rx.await? } pub async fn shutdown(&self) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); - self.send(ToActor::Shutdown { reply: Some(reply) }).await?; + self.send(Input::Shutdown { reply: Some(reply) }).await?; reply_rx.await?; Ok(()) } @@ -231,7 +220,7 @@ impl Drop for ActorHandle { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { let handle = handle.take().expect("can only drop once"); - self.tx.send(ToActor::Shutdown { reply: None }).ok(); + self.inbox_tx.send(Input::Shutdown { reply: None }).ok(); if let Err(err) = handle.join() { warn!(?err, "Failed to join sync actor"); } @@ -239,57 +228,11 @@ impl Drop for ActorHandle { } } -#[derive(Debug)] -pub struct SessionHandle { - // pub session_id: SessionId, - pub cancel_token: CancellationToken, - pub update_tx: mpsc::Sender, - pub event_rx: mpsc::Receiver, -} - -impl SessionHandle { - // pub fn session_id(&self) -> SessionId { - // self.session_id - // } - - /// Wait for the session to finish. - /// - /// Returns an error if the session failed to complete. - pub async fn on_finish(&mut self) -> Result<(), Arc> { - while let Some(event) = self.event_rx.recv().await { - if let SessionEvent::Complete { result } = event { - return result; - } - } - Ok(()) - } - - pub async fn send_update(&self, update: SessionUpdate) -> Result<()> { - self.update_tx.send(update).await?; - Ok(()) - } - - /// Finish the session gracefully. - /// - /// After calling this, no further protocol messages will be sent from this node. - /// Previously queued messages will still be sent out. The session will only be closed - /// once the other peer closes their senders as well. - pub fn close(&self) { - debug!("close session (session handle close called)"); - self.cancel_token.cancel(); - } -} - #[derive(derive_more::Debug, strum::Display)] -pub enum ToActor { +pub enum Input { InitSession { - our_role: Role, - peer: NodeId, - initial_transmission: InitialTransmission, - #[debug(skip)] - channels: Channels, - // init: SessionInit, - intents: Vec, + conn: WillowConn, + intents: Vec, reply: oneshot::Sender>, }, GetEntries { @@ -342,23 +285,25 @@ pub enum ToActor { } #[derive(Debug)] -struct ActiveSession { - // peer: NodeId, - task_key: TaskKey, // state: SharedSessionState - // event_tx: mpsc::Sender, -} - -#[derive(Debug)] -pub struct Actor { - inbox_rx: flume::Receiver, +struct Actor { + inbox_rx: flume::Receiver, store: Store, next_session_id: u64, - sessions: HashMap, session_tasks: JoinMap>>, tasks: JoinSet<()>, } impl Actor { + pub fn new(store: Store, inbox_rx: flume::Receiver) -> Self { + Self { + store, + inbox_rx, + next_session_id: 0, + session_tasks: Default::default(), + tasks: Default::default(), + } + } + pub fn run(self) -> Result<()> { let rt = tokio::runtime::Builder::new_current_thread() .build() @@ -366,12 +311,13 @@ impl Actor { let local_set = tokio::task::LocalSet::new(); local_set.block_on(&rt, async move { self.run_async().await }) } + async fn run_async(mut self) -> Result<()> { loop { tokio::select! { msg = self.inbox_rx.recv_async() => match msg { Err(_) => break, - Ok(ToActor::Shutdown { reply }) => { + Ok(Input::Shutdown { reply }) => { tokio::join!( self.tasks.shutdown(), self.session_tasks.shutdown() @@ -389,8 +335,8 @@ impl Actor { } }, Some((id, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => { - let _res = res.context("session task paniced")?; - self.complete_session(&id).await; + let res = res.context("session task paniced")?; + debug!(?id, ?res, "session complete"); } }; } @@ -403,17 +349,13 @@ impl Actor { id } - async fn handle_message(&mut self, message: ToActor) -> Result<(), SendReplyError> { + async fn handle_message(&mut self, message: Input) -> Result<(), SendReplyError> { trace!(%message, "tick: handle_message"); match message { - ToActor::Shutdown { .. } => unreachable!("handled in run"), - ToActor::InitSession { - peer, - channels, - our_role, - initial_transmission, + Input::Shutdown { .. } => unreachable!("handled in run"), + Input::InitSession { + conn, intents, - // init, reply, } => { let session_id = self.next_session_id(); @@ -424,27 +366,22 @@ impl Actor { let (event_tx, event_rx) = mpsc::channel(SESSION_EVENT_CHANNEL_CAP); let update_rx = tokio_stream::wrappers::ReceiverStream::new(update_rx); + let peer = conn.peer; let future = run_session( store, - channels, + conn, + intents, cancel_token.clone(), session_id, - our_role, - intents, - initial_transmission, EventSender(event_tx.clone()), update_rx, ) .instrument(error_span!("session", peer = %peer.fmt_short())); - let task_key = self.session_tasks.spawn_local(session_id, future); + let _task_key = self.session_tasks.spawn_local(session_id, future); - let active_session = ActiveSession { - // event_tx, - task_key, - // peer, - }; - self.sessions.insert(session_id, active_session); + // let active_session = ActiveSession { task_key }; + // self.sessions.insert(session_id, active_session); let handle = SessionHandle { // session_id, cancel_token, @@ -453,7 +390,7 @@ impl Actor { }; send_reply(reply, Ok(handle)) } - ToActor::GetEntries { + Input::GetEntries { namespace, range, reply, @@ -474,7 +411,7 @@ impl Actor { } } } - ToActor::IngestEntry { + Input::IngestEntry { authorised_entry, origin, reply, @@ -482,31 +419,31 @@ impl Actor { let res = self.store.entries().ingest(&authorised_entry, origin); send_reply(reply, res) } - ToActor::InsertEntry { entry, auth, reply } => { + Input::InsertEntry { entry, auth, reply } => { let res = self.store.insert_entry(entry, auth).await; let res = res.map_err(Into::into); send_reply(reply, res) } - ToActor::InsertSecret { secret, reply } => { + Input::InsertSecret { secret, reply } => { let res = self.store.secrets().insert(secret); send_reply(reply, res.map_err(anyhow::Error::from)) } - ToActor::CreateNamespace { kind, owner, reply } => { + Input::CreateNamespace { kind, owner, reply } => { let res = self .store .create_namespace(&mut rand::thread_rng(), kind, owner); send_reply(reply, res.map_err(anyhow::Error::from)) } - ToActor::CreateUser { reply } => { + Input::CreateUser { reply } => { let secret = UserSecretKey::generate(&mut rand::thread_rng()); let res = self.store.secrets().insert_user(secret); send_reply(reply, res.map_err(anyhow::Error::from)) } - ToActor::ImportCaps { caps, reply } => { + Input::ImportCaps { caps, reply } => { let res = self.store.auth().import_caps(caps); send_reply(reply, res.map_err(anyhow::Error::from)) } - ToActor::DelegateCaps { + Input::DelegateCaps { from, access_mode, to, @@ -519,31 +456,12 @@ impl Actor { .delegate_full_caps(from, access_mode, to, store); send_reply(reply, res.map_err(anyhow::Error::from)) } - ToActor::ResolveInterests { interests, reply } => { + Input::ResolveInterests { interests, reply } => { let res = self.store.auth().resolve_interests(interests); send_reply(reply, res.map_err(anyhow::Error::from)) } } } - - async fn complete_session(&mut self, session_id: &SessionId) { - let session = self.sessions.remove(session_id); - if let Some(session) = session { - // debug!(?session, ?result, "complete session"); - // let result = match result { - // Ok(()) => Ok(()), - // Err(err) => Err(Arc::new(err)), - // }; - // session - // .event_tx - // .send(EventKind::Closed { result }) - // .await - // .ok(); - self.session_tasks.remove(&session.task_key); - } else { - warn!("remove_session called for unknown session"); - } - } } #[derive(Debug)] @@ -553,14 +471,6 @@ fn send_reply(sender: oneshot::Sender, value: T) -> Result<(), SendReplyEr sender.send(value).map_err(send_reply_error) } -// fn send_reply_with( -// sender: oneshot::Sender>, -// this: &mut Actor, -// f: impl FnOnce(&mut Actor) -> Result, -// ) -> Result<(), SendReplyError> { -// sender.send(f(this)).map_err(send_reply_error) -// } - fn send_reply_error(_err: T) -> SendReplyError { SendReplyError } diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs new file mode 100644 index 0000000000..ceac81e07d --- /dev/null +++ b/iroh-willow/src/engine/peer_manager.rs @@ -0,0 +1,287 @@ +use std::{ + collections::{hash_map, HashMap, HashSet}, + sync::Arc, +}; + +use anyhow::{anyhow, Context, Result}; +use futures_buffered::join_all; +use futures_concurrency::future::future_group; +use futures_concurrency::future::Join; +use futures_lite::StreamExt; +use futures_util::FutureExt; +use iroh_net::{ + dialer::Dialer, + endpoint::{get_remote_node_id, Connection}, + util::SharedAbortingJoinHandle, + Endpoint, NodeId, +}; +use tokio::{ + io::Interest, + sync::{mpsc, oneshot}, + task::{AbortHandle, JoinHandle, JoinSet}, +}; +use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error_span, Instrument}; + +use crate::{ + auth::{Auth, InterestMap}, + net::{setup, WillowConn, ALPN}, + proto::{ + grouping::{Area, AreaOfInterest}, + keys::NamespaceId, + sync::{ReadAuthorisation, ReadCapability}, + }, + session::{ + error::ChannelReceiverDropped, + intents::{EventKind, Intent, IntentHandle}, + Error, Interests, Role, SessionEvent, SessionHandle, SessionId, SessionInit, SessionMode, + SessionUpdate, + }, + store::traits::Storage, +}; + +use super::actor::ActorHandle; + +#[derive(derive_more::Debug)] +pub enum Input { + SubmitIntent { + peer: NodeId, + intent: Intent, + }, + HandleConnection { + #[debug("Connection")] + conn: Connection, + }, +} + +#[derive(derive_more::Debug)] +pub struct PeerManager { + actor: ActorHandle, + endpoint: Endpoint, + inbox: mpsc::Receiver, + events_rx: StreamMap>, + tasks: JoinSet<(NodeId, Result)>, + peers: HashMap, +} + +impl PeerManager { + pub fn new( + actor_handle: ActorHandle, + endpoint: Endpoint, + inbox: mpsc::Receiver, + ) -> Self { + PeerManager { + endpoint: endpoint.clone(), + actor: actor_handle, + inbox, + events_rx: Default::default(), + tasks: Default::default(), + peers: Default::default(), + } + } + pub async fn run(mut self) -> Result<(), Error> { + loop { + tokio::select! { + Some(input) = self.inbox.recv() => { + debug!(?input, "tick: inbox"); + self.handle_input(input).await; + } + Some((session_id, event)) = self.events_rx.next(), if !self.events_rx.is_empty() => { + debug!(?session_id, ?event, "tick: event"); + self.handle_event(session_id, event); + } + Some(res) = self.tasks.join_next(), if !self.tasks.is_empty() => { + debug!("tick: task.join_next"); + match res { + Err(err) if err.is_cancelled() => continue, + Err(err) => Err(err).context("establish task paniced")?, + Ok((_peer, Ok(conn))) => self.on_established(conn).await?, + Ok((peer, Err(err))) => self.failed_to_connect(peer, Arc::new(Error::Net(err))).await, + } + } + else => break, + } + } + Ok(()) + } + + pub async fn handle_input(&mut self, input: Input) { + match input { + Input::SubmitIntent { peer, intent } => { + if let Err(err) = self.submit_intent(peer, intent).await { + tracing::warn!("failed to submit intent: {err:?}"); + } + } + Input::HandleConnection { conn } => { + self.handle_connection(conn); + } + } + } + + fn handle_connection(&mut self, conn: Connection) { + let peer = match get_remote_node_id(&conn) { + Ok(node_id) => node_id, + Err(err) => { + tracing::debug!("ignore incoming connection (QUIC handshake failed: {err})"); + return; + } + }; + let me = self.endpoint.node_id(); + + match self.peers.get_mut(&peer) { + None => { + let abort_handle = self + .tasks + .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); + let init = SessionInit::new(Interests::All, SessionMode::Live); + let intent = Intent::new_detached(init); + self.peers.insert( + peer, + PeerState::Pending { + our_role: Role::Betty, + intents: vec![intent], + abort_handle, + }, + ); + } + Some(PeerState::Pending { + our_role: Role::Alfie, + abort_handle, + intents, + }) => { + if me > peer { + tracing::debug!( + "ignore incoming connection (already dialing and our dial wins)" + ); + conn.close(0u8.into(), b"duplicate-our-dial-wins"); + } else { + // abort our dial attempt + abort_handle.abort(); + // set the new abort handle + *abort_handle = self + .tasks + .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); + // add catchall interest + let init = SessionInit::new(Interests::All, SessionMode::Live); + let intent = Intent::new_detached(init); + intents.push(intent); + } + } + Some(PeerState::Pending { + our_role: Role::Betty, + .. + }) => { + tracing::debug!("ignore incoming connection (already accepting)"); + conn.close(0u8.into(), b"duplicate-already-accepting"); + } + Some(PeerState::Active { .. }) => { + tracing::debug!("got connection for already active peer"); + conn.close(0u8.into(), b"duplicate-already-accepting"); + } + } + } + + async fn failed_to_connect(&mut self, peer: NodeId, error: Arc) { + let Some(peer_state) = self.peers.remove(&peer) else { + tracing::warn!(?peer, "attempted to remove unknown peer"); + return; + }; + match peer_state { + PeerState::Pending { intents, .. } => { + join_all( + intents + .into_iter() + .map(|intent| intent.send_abort(error.clone())), + ) + .await; + } + PeerState::Active { .. } => { + unreachable!("we don't accept connections for active peers") + } + }; + } + + async fn on_established(&mut self, conn: WillowConn) -> anyhow::Result<()> { + let peer = conn.peer; + let peer_state = self + .peers + .remove(&peer) + .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; + + let PeerState::Pending { intents, .. } = peer_state else { + anyhow::bail!("unreachable: on_established called for peer in wrong state") + }; + + let session_handle = self.actor.init_session(conn, intents).await?; + + let SessionHandle { + cancel_token: _, + update_tx, + event_rx, + } = session_handle; + self.events_rx.insert(peer, ReceiverStream::new(event_rx)); + self.peers.insert(peer, PeerState::Active { update_tx }); + Ok(()) + } + + pub async fn submit_intent(&mut self, peer: NodeId, intent: Intent) -> Result<()> { + match self.peers.get_mut(&peer) { + None => { + let intents = vec![intent]; + let me = self.endpoint.node_id(); + let endpoint = self.endpoint.clone(); + let abort_handle = self.tasks.spawn( + async move { + let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; + let conn = WillowConn::alfie(conn, me).await?; + Ok(conn) + } + .map(move |res| (peer, res)), + ); + let peer_state = PeerState::Pending { + intents, + abort_handle, + our_role: Role::Alfie, + }; + self.peers.insert(peer, peer_state); + } + Some(state) => match state { + PeerState::Pending { intents, .. } => { + intents.push(intent); + } + PeerState::Active { update_tx, .. } => { + update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; + } + }, + }; + Ok(()) + } + + pub fn handle_event(&mut self, peer: NodeId, event: SessionEvent) { + tracing::info!(?event, "event"); + match event { + SessionEvent::Established => {} + SessionEvent::Complete { .. } => { + self.peers.remove(&peer); + } + } + } +} + +#[derive(Debug)] +enum PeerState { + Pending { + our_role: Role, + intents: Vec, + abort_handle: AbortHandle, + }, + Active { + update_tx: mpsc::Sender, + // cancel_token: CancellationToken, + }, +} + +#[derive(Debug, thiserror::Error)] +#[error("receiver dropped")] +pub struct ReceiverDropped; diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index fcf82156e1..ee0f197c82 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,8 +1,7 @@ //! Implementation of willow -#![allow(missing_docs, unused_imports)] +#![allow(missing_docs)] -pub mod actor; pub mod auth; pub mod engine; pub mod form; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 3f1c5f25a4..87c536f332 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -2,15 +2,15 @@ use anyhow::ensure; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::{hash::Hash, key::NodeId}; -use iroh_net::endpoint::{Connection, RecvStream, SendStream}; +use iroh_net::endpoint::{get_remote_node_id, Connection, RecvStream, SendStream}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, - task::JoinSet, + task::{JoinHandle, JoinSet}, }; use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrument, Span}; use crate::{ - actor::{self, ActorHandle}, + engine::ActorHandle, proto::sync::{ AccessChallenge, Channel, InitialTransmission, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, @@ -20,8 +20,8 @@ use crate::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, - intents::IntentData, - Role, SessionInit, + intents::Intent, + Role, SessionHandle, SessionInit, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -31,6 +31,40 @@ use crate::{ pub const CHANNEL_CAP: usize = 1024 * 64; pub const ALPN: &[u8] = b"iroh-willow/0"; +#[derive(derive_more::Debug)] +pub struct WillowConn { + pub(crate) our_role: Role, + pub(crate) peer: NodeId, + #[debug("InitialTransmission")] + pub(crate) initial_transmission: InitialTransmission, + #[debug("Channels")] + pub(crate) channels: Channels, + pub(crate) join_handle: JoinHandle>, +} + +impl WillowConn { + pub async fn alfie(conn: Connection, me: NodeId) -> anyhow::Result { + Self::connect(conn, me, Role::Alfie).await + } + + pub async fn betty(conn: Connection, me: NodeId) -> anyhow::Result { + Self::connect(conn, me, Role::Betty).await + } + + async fn connect(conn: Connection, me: NodeId, our_role: Role) -> anyhow::Result { + let peer = get_remote_node_id(&conn)?; + let (initial_transmission, channels, mut join_set) = setup(conn, me, our_role).await?; + let join_handle = tokio::task::spawn(async move { join_all(&mut join_set).await }); + Ok(Self { + peer, + initial_transmission, + channels, + join_handle, + our_role, + }) + } +} + #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] pub async fn setup( conn: Connection, @@ -85,46 +119,11 @@ pub async fn run( actor: ActorHandle, conn: Connection, our_role: Role, - intents: Vec, - // init: SessionInit, + intents: Vec, ) -> anyhow::Result { - let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; - let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; - let handle = actor - .init_session(peer, our_role, initial_transmission, channels, intents) - .await?; - - Ok(SessionHandle { handle, tasks }) -} -// -#[derive(Debug)] -pub struct SessionHandle { - handle: actor::SessionHandle, - tasks: JoinSet>, -} - -impl SessionHandle { - /// Close the session gracefully. - /// - /// After calling this, no further protocol messages will be sent from this node. - /// Previously queued messages will still be sent out. The session will only be closed - /// once the other peer closes their senders as well. - pub fn close(&self) { - debug!("trigger user close"); - self.handle.close() - } - - /// Wait for the session to finish. - /// - /// Returns an error if the session failed to complete. - pub async fn join(&mut self) -> anyhow::Result<()> { - let session_res = self.handle.on_finish().await; - let net_tasks_res = join_all(&mut self.tasks).await; - match session_res { - Err(err) => Err(err.into()), - Ok(()) => net_tasks_res, - } - } + let conn = WillowConn::connect(conn, me, our_role).await?; + let handle = actor.init_session(conn, intents).await?; + Ok(handle) } #[derive(Debug, thiserror::Error)] @@ -327,8 +326,8 @@ mod tests { use tracing::info; use crate::{ - actor::ActorHandle, auth::{CapSelector, DelegateTo}, + engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, net::run, proto::{ @@ -337,7 +336,10 @@ mod tests { meadowcap::AccessMode, willow::{Entry, InvalidPath, Path}, }, - session::{intents::IntentHandle, Interests, Role, SessionInit, SessionMode}, + session::{ + intents::{Intent, IntentHandle}, + Interests, Role, SessionInit, SessionMode, + }, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -404,8 +406,8 @@ mod tests { let init_alfie = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); let init_betty = SessionInit::new(Interests::All, SessionMode::ReconcileOnce); - let (mut intent_alfie, intent_alfie_data) = IntentHandle::new(init_alfie); - let (mut intent_betty, intent_betty_data) = IntentHandle::new(init_betty); + let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); + let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); info!("init took {:?}", start.elapsed()); @@ -423,26 +425,30 @@ mod tests { handle_alfie.clone(), conn_alfie, Role::Alfie, - vec![intent_alfie_data] + vec![intent_alfie] ), run( node_id_betty, handle_betty.clone(), conn_betty, Role::Betty, - vec![intent_betty_data] + vec![intent_betty] ) ); let mut session_alfie = session_alfie?; let mut session_betty = session_betty?; - let (res_alfie, res_betty) = tokio::join!(intent_alfie.complete(), intent_betty.complete()); + let (res_alfie, res_betty) = tokio::join!( + intent_handle_alfie.complete(), + intent_handle_betty.complete() + ); info!("alfie intent res {:?}", res_alfie); info!("betty intent res {:?}", res_betty); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); + let (res_alfie, res_betty) = + tokio::join!(session_alfie.complete(), session_betty.complete()); info!("alfie session res {:?}", res_alfie); info!("betty session res {:?}", res_betty); assert!(res_alfie.is_ok()); @@ -555,8 +561,8 @@ mod tests { let init_alfie = SessionInit::new(Interests::All, SessionMode::Live); let init_betty = SessionInit::new(Interests::All, SessionMode::Live); - let (mut intent_alfie, intent_alfie_data) = IntentHandle::new(init_alfie); - let (mut intent_betty, intent_betty_data) = IntentHandle::new(init_betty); + let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); + let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); let (session_alfie, session_betty) = tokio::join!( run( @@ -564,14 +570,14 @@ mod tests { handle_alfie.clone(), conn_alfie, Role::Alfie, - vec![intent_alfie_data] + vec![intent_alfie] ), run( node_id_betty, handle_betty.clone(), conn_betty, Role::Betty, - vec![intent_betty_data] + vec![intent_betty] ) ); let mut session_alfie = session_alfie?; @@ -583,14 +589,18 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; session_alfie.close(); - let (res_alfie, res_betty) = tokio::join!(intent_alfie.complete(), intent_betty.complete()); + let (res_alfie, res_betty) = tokio::join!( + intent_handle_alfie.complete(), + intent_handle_betty.complete() + ); info!(time=?start.elapsed(), "reconciliation finished"); info!("alfie intent res {:?}", res_alfie); info!("betty intent res {:?}", res_betty); assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - let (res_alfie, res_betty) = tokio::join!(session_alfie.join(), session_betty.join()); + let (res_alfie, res_betty) = + tokio::join!(session_alfie.complete(), session_betty.complete()); info!("alfie session res {:?}", res_alfie); info!("betty session res {:?}", res_betty); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 66e3086b03..f5db3a4998 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,16 +1,17 @@ use std::{ collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - sync::mpsc, + sync::Arc, }; -use tokio::sync::oneshot; +use tokio::sync::{mpsc, oneshot}; +use tokio_util::sync::CancellationToken; use crate::{ auth::CapSelector, proto::{grouping::AreaOfInterest, sync::ReadAuthorisation}, session::{ - events::EventKind, - intents::{IntentChannels, IntentData}, + error::ChannelReceiverDropped, + intents::{EventKind, Intent}, }, }; @@ -18,8 +19,7 @@ mod aoi_finder; mod capabilities; pub mod channels; mod data; -mod error; -pub mod events; +pub mod error; pub mod intents; mod pai_finder; mod payload; @@ -126,21 +126,8 @@ impl From for Interests { #[derive(Debug)] pub enum SessionUpdate { - SubmitIntent(IntentData), -} - -// impl Interest { -// pub fn merge(&self, other: &Interests) -> Self { -// match (self, other) { -// (Self::All, _) => Self::All, -// (_, Self::All) => Self::All, -// (Self::Some(a), Self::Some(b)) => { -// -// } -// -// } -// } -// } + SubmitIntent(Intent), +} #[derive(Debug, Default, Clone)] pub enum AreaOfInterestSelector { @@ -174,3 +161,54 @@ pub enum Scope { /// Resources bound by the other peer. Theirs, } + +#[derive(Debug, Clone)] +pub struct EventSender(pub mpsc::Sender); + +impl EventSender { + pub async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { + self.0.send(event).await.map_err(|_| ChannelReceiverDropped) + } +} + +#[derive(Debug)] +pub enum SessionEvent { + Established, + Complete { result: Result<(), Arc> }, +} + +#[derive(Debug)] +pub struct SessionHandle { + pub cancel_token: CancellationToken, + pub update_tx: mpsc::Sender, + pub event_rx: mpsc::Receiver, +} + +impl SessionHandle { + /// Wait for the session to finish. + /// + /// Returns an error if the session failed to complete. + pub async fn complete(&mut self) -> Result<(), Arc> { + while let Some(event) = self.event_rx.recv().await { + if let SessionEvent::Complete { result } = event { + return result; + } + } + Ok(()) + } + + pub async fn send_update(&self, update: SessionUpdate) -> anyhow::Result<()> { + self.update_tx.send(update).await?; + Ok(()) + } + + /// Finish the session gracefully. + /// + /// After calling this, no further protocol messages will be sent from this node. + /// Previously queued messages will still be sent out. The session will only be closed + /// once the other peer closes their senders as well. + pub fn close(&self) { + tracing::debug!("close session (session handle close called)"); + self.cancel_token.cancel(); + } +} diff --git a/iroh-willow/src/session/events.rs b/iroh-willow/src/session/events.rs deleted file mode 100644 index f978e59f15..0000000000 --- a/iroh-willow/src/session/events.rs +++ /dev/null @@ -1,771 +0,0 @@ -use std::{ - collections::{hash_map, HashMap, HashSet}, - sync::Arc, -}; - -use anyhow::{anyhow, Context, Result}; -use futures_buffered::join_all; -use futures_concurrency::future::Join; -use futures_lite::StreamExt; -use futures_util::FutureExt; -use iroh_net::{ - dialer::Dialer, endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId, -}; -use tokio::{ - io::Interest, - sync::{mpsc, oneshot}, - task::{AbortHandle, JoinHandle, JoinSet}, -}; -use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; -use tokio_util::sync::CancellationToken; -use tracing::{error_span, Instrument}; - -use crate::{ - actor::{Actor, ActorHandle, SessionHandle}, - auth::{Auth, InterestMap}, - net::{setup, ALPN}, - proto::{ - grouping::{Area, AreaOfInterest}, - keys::NamespaceId, - sync::{ReadAuthorisation, ReadCapability}, - }, - session::{ - error::ChannelReceiverDropped, - intents::{IntentChannels, IntentData, IntentHandle, IntentInfo}, - Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate, - }, - store::traits::Storage, -}; - -const COMMAND_CHANNEL_CAP: usize = 128; - -#[derive(Debug, Clone)] -pub struct EventSender(pub mpsc::Sender); - -impl EventSender { - pub async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { - self.0.send(event).await.map_err(|_| ChannelReceiverDropped) - } -} - -#[derive(Debug)] -pub enum SessionEvent { - Revealed, - Complete { result: Result<(), Arc> }, -} - -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum EventKind { - CapabilityIntersection { - namespace: NamespaceId, - area: Area, - }, - InterestIntersection { - namespace: NamespaceId, - area: AreaOfInterest, - }, - Reconciled { - namespace: NamespaceId, - area: AreaOfInterest, - }, - ReconciledAll, - Abort { - error: Arc, - }, -} - -impl EventKind { - pub fn namespace(&self) -> Option { - match self { - EventKind::CapabilityIntersection { namespace, .. } => Some(*namespace), - EventKind::InterestIntersection { namespace, .. } => Some(*namespace), - EventKind::Reconciled { namespace, .. } => Some(*namespace), - _ => None, - } - } -} - -#[derive(Debug)] -pub enum Command { - SubmitIntent { peer: NodeId, intent: IntentData }, - HandleConnection { conn: Connection }, -} - -#[derive(Debug, Clone)] -pub struct ManagedHandle { - actor: ActorHandle, - command_tx: mpsc::Sender, - _task_handle: SharedAbortingJoinHandle>, -} - -impl ManagedHandle { - pub fn spawn( - endpoint: Endpoint, - create_store: impl 'static + Send + FnOnce() -> S, - ) -> Self { - let me = endpoint.node_id(); - let actor = ActorHandle::spawn(create_store, me); - let (command_tx, command_rx) = mpsc::channel(COMMAND_CHANNEL_CAP); - let peer_manager = PeerManager { - session_event_rx: Default::default(), - betty_intent_rx: Default::default(), - command_rx, - establish_tasks: Default::default(), - net_tasks: Default::default(), - actor: actor.clone(), - peers: Default::default(), - endpoint: endpoint.clone(), - dialer: Dialer::new(endpoint), - }; - let task_handle = tokio::task::spawn( - async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } - .instrument(error_span!("peer_manager", me = me.fmt_short())), - ); - ManagedHandle { - actor, - command_tx, - _task_handle: task_handle.into(), - } - } - - pub async fn handle_connection(&self, conn: Connection) -> Result<()> { - self.command_tx - .send(Command::HandleConnection { conn }) - .await?; - Ok(()) - } - - pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { - // TODO: expose cap - let (handle, intent) = IntentHandle::new(init); - self.command_tx - .send(Command::SubmitIntent { peer, intent }) - .await?; - Ok(handle) - } -} - -impl std::ops::Deref for ManagedHandle { - type Target = ActorHandle; - - fn deref(&self) -> &Self::Target { - &self.actor - } -} - -type NetTasks = JoinSet>; - -type EstablishRes = (NodeId, Result<(NetTasks, SessionHandle)>); - -#[derive(derive_more::Debug)] -pub struct PeerManager { - session_event_rx: StreamMap>, - betty_intent_rx: StreamMap>, - command_rx: mpsc::Receiver, - establish_tasks: JoinSet, - net_tasks: JoinSet<(NodeId, Result<()>)>, - actor: ActorHandle, - peers: HashMap, - endpoint: Endpoint, - dialer: Dialer, -} - -impl PeerManager { - pub async fn run(mut self) -> Result<(), Error> { - loop { - tokio::select! { - Some((session_id, event)) = self.session_event_rx.next(), if !self.session_event_rx.is_empty() => { - self.received_event(session_id, event).await; - } - Some((_session_id, _event)) = self.betty_intent_rx.next(), if !self.betty_intent_rx.is_empty() => { - // TODO: Do we want to emit these somewhere? - // self.received_event(session_id, event).await; - } - Some(command) = self.command_rx.recv() => { - self.received_command(command).await; - } - Some(res) = self.establish_tasks.join_next(), if !self.establish_tasks.is_empty() => { - match res { - Err(err) if err.is_cancelled() => continue, - Err(err) => Err(err).context("establish task paniced")?, - Ok((peer, Ok((tasks, handle)))) => self.on_established(peer, handle, tasks)?, - Ok((peer, Err(err))) => self.remove_peer(peer, Err(Arc::new(Error::Net(err)))).await, - } - } - Some(res) = self.net_tasks.join_next(), if !self.net_tasks.is_empty() => { - match res { - Err(err) if err.is_cancelled() => continue, - Err(err) => Err(err).context("net task paniced")?, - Ok((_peer, Ok(())))=> continue, - Ok((peer, Err(err))) => { - // TODO: Forward to session? - tracing::warn!(?peer, ?err, "net task failed"); - } - } - }, - Some((peer, conn)) = self.dialer.next() => { - match conn { - Ok(conn) => self.handle_connection(conn, Role::Alfie).await, - Err(err) => self.on_dial_fail(peer, err).await, - } - - } - else => break, - } - } - Ok(()) - } - - pub async fn received_command(&mut self, command: Command) { - tracing::info!(?command, "command"); - match command { - Command::SubmitIntent { peer, intent } => { - if let Err(err) = self.submit_intent(peer, intent).await { - tracing::warn!("failed to submit intent: {err:?}"); - } - } - Command::HandleConnection { conn } => { - self.handle_connection(conn, Role::Betty).await; - } - } - } - - async fn remove_peer(&mut self, peer: NodeId, result: Result<(), Arc>) { - let Some(peer_state) = self.peers.remove(&peer) else { - tracing::warn!(?peer, "attempted to remove unknown peer"); - return; - }; - let intents = match peer_state { - PeerState::Connecting { intents, .. } => { - self.dialer.abort_dial(&peer); - Some(intents) - } - PeerState::Establishing { intents, .. } => Some(intents), - PeerState::Active { cancel_token, .. } => { - cancel_token.cancel(); - None - } - PeerState::Placeholder => unreachable!(), - }; - if let Some(intents) = intents { - if let Err(error) = result { - join_all( - intents - .into_iter() - .map(|intent| intent.send_abort(error.clone())), - ) - .await; - } - } - self.session_event_rx.remove(&peer); - self.betty_intent_rx.remove(&peer); - } - - async fn on_dial_fail(&mut self, peer: NodeId, err: anyhow::Error) { - let result = Err(Arc::new(Error::Net(err))); - self.remove_peer(peer, result).await; - } - - fn on_established( - &mut self, - peer: NodeId, - session_handle: SessionHandle, - mut net_tasks: NetTasks, - ) -> anyhow::Result<()> { - let peer_state = self - .peers - .get_mut(&peer) - .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; - let current_state = std::mem::replace(peer_state, PeerState::Placeholder); - let PeerState::Establishing { - // our_role, - intents: _, - betty_catchall_intent, - } = current_state - else { - anyhow::bail!("unreachable: on_established called for peer in wrong state") - }; - let SessionHandle { - // session_id, - cancel_token, - update_tx, - event_rx, - } = session_handle; - self.net_tasks.spawn( - async move { crate::net::join_all(&mut net_tasks).await }.map(move |r| (peer, r)), - ); - self.session_event_rx - .insert(peer, ReceiverStream::new(event_rx)); - // TODO: submit intents that were submitted while establishing - // for intent in intents { - // update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; - // } - if let Some(handle) = betty_catchall_intent { - self.betty_intent_rx.insert(peer, handle.split().1); - } - *peer_state = PeerState::Active { - // session_id, - cancel_token, - update_tx, - // our_role, - }; - Ok(()) - } - - pub async fn submit_intent(&mut self, peer: NodeId, intent: IntentData) -> Result<()> { - match self.peers.get_mut(&peer) { - None => { - self.dialer.queue_dial(peer, ALPN); - let intents = vec![intent]; - let peer_state = PeerState::Connecting { intents }; - self.peers.insert(peer, peer_state); - } - Some(state) => match state { - PeerState::Connecting { intents } => { - intents.push(intent); - } - PeerState::Establishing { intents, .. } => { - intents.push(intent); - } - PeerState::Active { update_tx, .. } => { - update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; - } - PeerState::Placeholder => unreachable!(), - }, - }; - Ok(()) - } - - pub async fn received_event(&mut self, peer: NodeId, event: SessionEvent) { - tracing::info!(?event, "event"); - match event { - SessionEvent::Revealed => {} - SessionEvent::Complete { result } => { - self.remove_peer(peer, result).await; - } - } - } - async fn handle_connection(&mut self, conn: Connection, our_role: Role) { - let peer = match iroh_net::endpoint::get_remote_node_id(&conn) { - Ok(node_id) => node_id, - Err(err) => { - tracing::warn!(?err, "skip connection: failed to get node id"); - return; - } - }; - if let Err(err) = self.handle_connection_inner(peer, conn, our_role) { - tracing::warn!(?peer, ?err, "failed to establish connection"); - let result = Err(Arc::new(Error::Net(err))); - self.remove_peer(peer, result).await; - } - } - - fn handle_connection_inner( - &mut self, - peer: NodeId, - conn: Connection, - our_role: Role, - ) -> Result<()> { - let peer_state = self.peers.get_mut(&peer); - let (intents, betty_catchall_intent) = match our_role { - Role::Alfie => { - let peer_state = peer_state - .ok_or_else(|| anyhow!("got connection for peer without any intents"))?; - let peer_state = std::mem::replace(peer_state, PeerState::Placeholder); - match peer_state { - PeerState::Placeholder => unreachable!(), - PeerState::Active { .. } => { - tracing::warn!("got connection for already active peer"); - return Ok(()); - } - PeerState::Establishing { .. } => { - tracing::warn!("got connection for already establishing peer"); - return Ok(()); - } - PeerState::Connecting { intents } => (intents, None), - } - } - Role::Betty => { - let mut intents = if let Some(peer_state) = peer_state { - let peer_state = std::mem::replace(peer_state, PeerState::Placeholder); - match peer_state { - PeerState::Placeholder => unreachable!(), - PeerState::Active { .. } => { - tracing::warn!("got connection for already active peer"); - return Ok(()); - } - PeerState::Establishing { .. } => { - tracing::warn!("got connection for already establishing peer"); - return Ok(()); - } - PeerState::Connecting { intents, .. } => { - // TODO: Decide which conn to use. - intents - } - } - } else { - Default::default() - }; - let all_init = SessionInit::new(Interests::All, SessionMode::Live); - let (handle, data) = IntentHandle::new(all_init); - intents.push(data); - (intents, Some(handle)) - } - }; - - let me = self.endpoint.node_id(); - let actor = self.actor.clone(); - let establish_fut = async move { - let (initial_transmission, channels, tasks) = setup(conn, me, our_role).await?; - let session_handle = actor - .init_session(peer, our_role, initial_transmission, channels, intents) - .await?; - Ok::<_, anyhow::Error>((tasks, session_handle)) - }; - let establish_fut = establish_fut.map(move |res| (peer, res)); - let _task_handle = self.establish_tasks.spawn(establish_fut); - let peer_state = PeerState::Establishing { - // our_role, - intents: Vec::new(), - betty_catchall_intent, - }; - self.peers.insert(peer, peer_state); - Ok(()) - } -} - -#[derive(Debug)] -enum PeerState { - Connecting { - intents: Vec, - }, - Establishing { - // our_role: Role, - intents: Vec, - betty_catchall_intent: Option, - }, - Active { - // session_id: SessionId, - // our_role: Role, - update_tx: mpsc::Sender, - cancel_token: CancellationToken, - }, - Placeholder, -} - -#[derive(Debug, thiserror::Error)] -#[error("receiver dropped")] -pub struct ReceiverDropped; - -#[cfg(test)] -mod tests { - use bytes::Bytes; - use iroh_net::{Endpoint, NodeAddr, NodeId}; - use rand::SeedableRng; - use rand_chacha::ChaCha12Rng; - use std::collections::HashMap; - - use super::{EventKind, ManagedHandle, ALPN}; - use crate::{ - actor::ActorHandle, - auth::{CapSelector, DelegateTo}, - form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, - // net::run, - proto::{ - grouping::{Area, AreaOfInterest, ThreeDRange}, - keys::{NamespaceId, NamespaceKind, UserId}, - meadowcap::AccessMode, - willow::{Entry, InvalidPath, Path}, - }, - session::{Interests, Role, SessionInit, SessionMode}, - }; - - fn create_rng(seed: &str) -> ChaCha12Rng { - let seed = iroh_base::hash::Hash::new(seed); - rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) - } - - #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_two_intents() -> anyhow::Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_two_intents"); - let ( - shutdown, - namespace, - (alfie, _alfie_node_id, _alfie_user), - (betty, betty_node_id, betty_user), - ) = create_and_setup_two(&mut rng).await?; - - insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; - - let task_foo = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"foo"]).unwrap(); - - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - // assert_eq!( - // intent.next().await.unwrap(), - // EventKind::Closed { result: Ok(()) } - // ); - - assert!(intent.next().await.is_none()); - } - }); - - let task_bar = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"bar"]).unwrap(); - - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - // assert_eq!( - // intent.next().await.unwrap(), - // EventKind::Closed { result: Ok(()) } - // ); - - assert!(intent.next().await.is_none()); - } - }); - - task_foo.await.unwrap(); - task_bar.await.unwrap(); - shutdown(); - Ok(()) - } - - #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_update_intent() -> anyhow::Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_update_intent"); - let ( - shutdown, - namespace, - (alfie, _alfie_node_id, _alfie_user), - (betty, betty_node_id, betty_user), - ) = create_and_setup_two(&mut rng).await?; - - insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - - let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::Live); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - println!("start"); - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - println!("first in!"); - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - let path = Path::new(&[b"bar"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); - intent.add_interests(interests).await?; - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - intent.close().await; - - assert!(intent.next().await.is_none(),); - - shutdown(); - Ok(()) - } - - pub async fn create_and_setup_two( - rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<( - impl Fn(), - NamespaceId, - (ManagedHandle, NodeId, UserId), - (ManagedHandle, NodeId, UserId), - )> { - let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng).await?; - let (betty, betty_ep, betty_addr, betty_task) = create(rng).await?; - - let betty_node_id = betty_addr.node_id; - let alfie_node_id = alfie_addr.node_id; - alfie_ep.add_node_addr(betty_addr)?; - betty_ep.add_node_addr(alfie_addr)?; - - let (namespace_id, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - - let shutdown = move || { - betty_task.abort(); - alfie_task.abort(); - }; - Ok(( - shutdown, - namespace_id, - (alfie, alfie_node_id, alfie_user), - (betty, betty_node_id, betty_user), - )) - } - - pub async fn create( - rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<( - ManagedHandle, - Endpoint, - iroh_net::NodeAddr, - tokio::task::JoinHandle>, - )> { - let endpoint = Endpoint::builder() - .secret_key(iroh_net::key::SecretKey::generate_with_rng(rng)) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let node_addr = endpoint.node_addr().await?; - let payloads = iroh_blobs::store::mem::Store::default(); - let create_store = move || crate::store::memory::Store::new(payloads); - let handle = ManagedHandle::spawn(endpoint.clone(), create_store); - let accept_task = tokio::task::spawn({ - let handle = handle.clone(); - let endpoint = endpoint.clone(); - async move { - while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; - if alpn != ALPN { - continue; - } - let conn = conn.await?; - handle.handle_connection(conn).await?; - } - Ok::<_, anyhow::Error>(()) - } - }); - Ok((handle, endpoint, node_addr, accept_task)) - } - - async fn setup_and_delegate( - alfie: &ManagedHandle, - betty: &ManagedHandle, - ) -> anyhow::Result<(NamespaceId, UserId, UserId)> { - let user_alfie = alfie.create_user().await?; - let user_betty = betty.create_user().await?; - - let namespace_id = alfie - .create_namespace(NamespaceKind::Owned, user_alfie) - .await?; - - let cap_for_betty = alfie - .delegate_caps( - CapSelector::widest(namespace_id), - AccessMode::Write, - DelegateTo::new(user_betty, None), - ) - .await?; - - betty.import_caps(cap_for_betty).await?; - Ok((namespace_id, user_alfie, user_betty)) - } - - async fn insert( - handle: &ManagedHandle, - namespace_id: NamespaceId, - user: UserId, - path: &[&[u8]], - bytes: impl Into, - ) -> anyhow::Result<()> { - let path = Path::new(path)?; - let entry = EntryForm::new_bytes(namespace_id, path, bytes); - handle.insert(entry, user).await?; - Ok(()) - } -} diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 957a0a79ae..158fe14f48 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -21,7 +21,6 @@ use tokio_util::sync::{CancellationToken, PollSender}; use tracing::{debug, error_span, Instrument}; use crate::{ - actor::{Actor, ActorHandle, SessionHandle}, auth::{Auth, InterestMap}, net::{setup, ALPN}, proto::{ @@ -30,9 +29,8 @@ use crate::{ sync::{ReadAuthorisation, ReadCapability}, }, session::{ - error::ChannelReceiverDropped, - events::{EventKind, ReceiverDropped}, - Error, Interests, Role, SessionId, SessionInit, SessionMode, SessionUpdate, + error::ChannelReceiverDropped, Error, Interests, Role, SessionHandle, SessionId, + SessionInit, SessionMode, SessionUpdate, }, store::traits::Storage, util::gen_stream::GenStream, @@ -48,37 +46,169 @@ pub type IntentId = u64; type Sender = mpsc::Sender; type Receiver = mpsc::Receiver; +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum EventKind { + CapabilityIntersection { + namespace: NamespaceId, + area: Area, + }, + InterestIntersection { + namespace: NamespaceId, + area: AreaOfInterest, + }, + Reconciled { + namespace: NamespaceId, + area: AreaOfInterest, + }, + ReconciledAll, + Abort { + error: Arc, + }, +} + +impl EventKind { + pub fn namespace(&self) -> Option { + match self { + EventKind::CapabilityIntersection { namespace, .. } => Some(*namespace), + EventKind::InterestIntersection { namespace, .. } => Some(*namespace), + EventKind::Reconciled { namespace, .. } => Some(*namespace), + _ => None, + } + } +} + #[derive(Debug)] -pub struct IntentData { - pub init: SessionInit, - pub channels: IntentChannels, +pub enum IntentUpdate { + AddInterests(Interests), + Close, } -impl IntentData { - pub(super) async fn send_abort(self, error: Arc) { - self.channels - .event_tx - .send(EventKind::Abort { error }) - .await - .ok(); +/// A synchronisation intent. +#[derive(Debug)] +pub struct Intent { + pub(super) init: SessionInit, + channels: Option, +} + +impl Intent { + /// Create a new intent with associated handle. + /// + /// The returned [`Intent`] must be passed into a session. + /// The returned [`IntentHandle`] can issue updates to the intent, and receives events for the + /// intent. The [`IntentHandle`] must be received from in a loop, otherwise the session will + /// block. + pub fn new(init: SessionInit) -> (Self, IntentHandle) { + Self::new_with_cap(init, INTENT_EVENT_CAP, INTENT_UPDATE_CAP) + } + + /// Create a new detached intent. + /// + /// A detached intent submits interests into a session, but does not allow to issue updates or + /// receive events. + pub fn new_detached(init: SessionInit) -> Self { + Self { + init, + channels: None, + } + } + + fn new_with_cap( + init: SessionInit, + event_cap: usize, + update_cap: usize, + ) -> (Self, IntentHandle) { + let (event_tx, event_rx) = mpsc::channel(event_cap); + let (update_tx, update_rx) = mpsc::channel(update_cap); + let handle = IntentHandle { + event_rx, + update_tx, + }; + let channels = IntentChannels { + event_tx, + update_rx, + }; + let intent = Intent { + init, + channels: Some(channels), + }; + (intent, handle) + } + + /// Abort the intent. + /// + /// Will send a final [`EventKind::Abort`] if the intent is not detached. + pub async fn send_abort(self, error: Arc) { + if let Some(channels) = self.channels { + channels + .event_tx + .send(EventKind::Abort { error }) + .await + .ok(); + } } } +/// Handle to a [`Intent`] #[derive(Debug)] -pub enum Input { +pub struct IntentHandle { + event_rx: Receiver, + update_tx: Sender, +} + +impl IntentHandle { + pub fn split(self) -> (PollSender, ReceiverStream) { + ( + PollSender::new(self.update_tx), + ReceiverStream::new(self.event_rx), + ) + } + + pub async fn next(&mut self) -> Option { + self.event_rx.recv().await + } + + pub async fn complete(&mut self) -> Result<(), Arc> { + while let Some(event) = self.event_rx.recv().await { + if let EventKind::Abort { error } = event { + return Err(error); + } + } + Ok(()) + } + + pub async fn add_interests(&self, interests: impl Into) -> Result<()> { + self.update_tx + .send(IntentUpdate::AddInterests(interests.into())) + .await?; + Ok(()) + } + + pub async fn close(&self) { + self.update_tx.send(IntentUpdate::Close).await.ok(); + } +} + +#[derive(Debug)] +struct IntentChannels { + event_tx: Sender, + update_rx: Receiver, +} + +#[derive(Debug)] +pub(super) enum Input { EmitEvent(EventKind), - SubmitIntent(IntentData), + SubmitIntent(Intent), } #[derive(Debug)] -pub enum Output { +pub(super) enum Output { SubmitInterests(InterestMap), AllIntentsDropped, } #[derive(derive_more::Debug)] -pub struct IntentDispatcher { - pending_intents: VecDeque, +pub(super) struct IntentDispatcher { + pending_intents: VecDeque, intents: HashMap, auth: Auth, #[debug("StreamMap")] @@ -88,7 +218,7 @@ pub struct IntentDispatcher { } impl IntentDispatcher { - pub fn new(auth: Auth, initial_intents: impl IntoIterator) -> Self { + pub(super) fn new(auth: Auth, initial_intents: impl IntoIterator) -> Self { Self { pending_intents: initial_intents.into_iter().collect(), intents: Default::default(), @@ -99,16 +229,21 @@ impl IntentDispatcher { } } - pub async fn abort_all(&self, error: Arc) { + pub(super) async fn abort_all(&self, error: Arc) { let _ = futures_buffered::join_all( - self.pending_intents - .iter() - .map(|intent| &intent.channels.event_tx) - .chain(self.intents.values().map(|intent| &intent.event_tx)) - .map(|event_tx| { - let error = error.clone(); - async move { event_tx.send(EventKind::Abort { error }).await } - }), + Iterator::chain( + self.pending_intents + .iter() + .flat_map(|intent| intent.channels.as_ref()) + .map(|ch| &ch.event_tx), + self.intents + .values() + .flat_map(|intent| intent.event_tx.as_ref()), + ) + .map(|event_tx| { + let error = error.clone(); + async move { event_tx.send(EventKind::Abort { error }).await } + }), ) .await; } @@ -116,18 +251,14 @@ impl IntentDispatcher { /// Run the [`IntentDispatcher`]. /// /// The returned stream is a generator, so it must be polled repeatedly to progress. - pub fn run_gen( + pub(super) fn run_gen( &mut self, inbox: impl Stream + 'static, ) -> GenStream> + '_> { GenStream::new(|co| self.run(co, inbox)) } - pub async fn run( - &mut self, - co: Co, - inbox: impl Stream, - ) -> Result<(), Error> { + async fn run(&mut self, co: Co, inbox: impl Stream) -> Result<(), Error> { tokio::pin!(inbox); while let Some(intent) = self.pending_intents.pop_front() { @@ -171,17 +302,21 @@ impl IntentDispatcher { Ok(()) } - async fn submit_intent(&mut self, co: &Co, intent: IntentData) -> Result<(), Error> { + async fn submit_intent(&mut self, co: &Co, intent: Intent) -> Result<(), Error> { let interests = self.auth.resolve_interests(intent.init.interests)?; let intent_id = { let intent_id = self.next_intent_id; self.next_intent_id += 1; intent_id }; - let IntentChannels { - event_tx, - update_rx, - } = intent.channels; + let (event_tx, update_rx) = match intent.channels { + None => (None, None), + Some(IntentChannels { + event_tx, + update_rx, + }) => (Some(event_tx), Some(update_rx)), + }; + let mut info = IntentInfo { interests: flatten_interests(&interests), mode: intent.init.mode, @@ -196,10 +331,12 @@ impl IntentDispatcher { if !info.is_complete() { self.intents.insert(intent_id, info); - self.intent_update_rx.insert( - intent_id, - StreamNotifyClose::new(ReceiverStream::new(update_rx)), - ); + if let Some(update_rx) = update_rx { + self.intent_update_rx.insert( + intent_id, + StreamNotifyClose::new(ReceiverStream::new(update_rx)), + ); + } co.yield_(Output::SubmitInterests(interests)).await; } @@ -220,7 +357,7 @@ impl IntentDispatcher { let send_res = futures_buffered::join_all(send_futs).await; for (id, res) in send_res.into_iter() { match res { - Err(ReceiverDropped) => { + Err(ChannelReceiverDropped) => { if !self.intent_update_rx.contains_key(&id) { self.cancel_intent(co, id).await; } @@ -234,7 +371,7 @@ impl IntentDispatcher { } } - pub async fn update_intent( + async fn update_intent( &mut self, co: &Co, intent_id: u64, @@ -257,7 +394,7 @@ impl IntentDispatcher { Ok(()) } - pub async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { + async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { debug!(?intent_id, "cancel intent"); self.intent_update_rx.remove(&intent_id); self.intents.remove(&intent_id); @@ -267,82 +404,11 @@ impl IntentDispatcher { } } -#[derive(Debug)] -pub enum IntentUpdate { - AddInterests(Interests), - Close, -} - -#[derive(Debug)] -pub struct IntentHandle { - event_rx: Receiver, - update_tx: Sender, -} - -#[derive(Debug)] -pub struct IntentChannels { - event_tx: Sender, - update_rx: Receiver, -} - -impl IntentHandle { - pub fn new(init: SessionInit) -> (Self, IntentData) { - let (handle, channels) = Self::with_cap(INTENT_EVENT_CAP, INTENT_UPDATE_CAP); - let data = IntentData { init, channels }; - (handle, data) - } - - pub fn with_cap(event_cap: usize, update_cap: usize) -> (Self, IntentChannels) { - let (event_tx, event_rx) = mpsc::channel(event_cap); - let (update_tx, update_rx) = mpsc::channel(update_cap); - ( - IntentHandle { - event_rx, - update_tx, - }, - IntentChannels { - event_tx, - update_rx, - }, - ) - } - pub fn split(self) -> (PollSender, ReceiverStream) { - ( - PollSender::new(self.update_tx), - ReceiverStream::new(self.event_rx), - ) - } - - pub async fn next(&mut self) -> Option { - self.event_rx.recv().await - } - - pub async fn complete(&mut self) -> Result<(), Arc> { - while let Some(event) = self.event_rx.recv().await { - if let EventKind::Abort { error } = event { - return Err(error); - } - } - Ok(()) - } - - pub async fn add_interests(&self, interests: impl Into) -> Result<()> { - self.update_tx - .send(IntentUpdate::AddInterests(interests.into())) - .await?; - Ok(()) - } - - pub async fn close(&self) { - self.update_tx.send(IntentUpdate::Close).await.ok(); - } -} - #[derive(Debug)] pub(super) struct IntentInfo { interests: NamespaceInterests, mode: SessionMode, - event_tx: Sender, + event_tx: Option>, } impl IntentInfo { @@ -360,7 +426,10 @@ impl IntentInfo { } fn events_closed(&self) -> bool { - self.event_tx.is_closed() + match &self.event_tx { + None => false, + Some(event_tx) => event_tx.is_closed(), + } } async fn on_reconciled(&mut self, namespace: NamespaceId, area: &AreaOfInterest) -> Result<()> { @@ -405,7 +474,7 @@ impl IntentInfo { pub(super) async fn handle_event( &mut self, event: &EventKind, - ) -> Result { + ) -> Result { let matches = match event { EventKind::CapabilityIntersection { namespace, .. } => { self.interests.contains_key(namespace) @@ -429,8 +498,15 @@ impl IntentInfo { Ok(self.is_complete()) } - async fn send(&self, event: EventKind) -> Result<(), ReceiverDropped> { - self.event_tx.send(event).await.map_err(|_| ReceiverDropped) + async fn send(&self, event: EventKind) -> Result<(), ChannelReceiverDropped> { + if let Some(event_tx) = &self.event_tx { + event_tx + .send(event) + .await + .map_err(|_| ChannelReceiverDropped) + } else { + Ok(()) + } } } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 9e0abdde9e..19b3384500 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -39,7 +39,7 @@ use crate::{ session::{ aoi_finder::AoiIntersection, channels::{ChannelSenders, MessageReceiver}, - events::{EventKind, EventSender}, + intents::EventKind, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, Error, Role, SessionId, SessionMode, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index e097c0a6a2..32504e55ba 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -15,6 +15,7 @@ use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ auth::InterestMap, + net::WillowConn, proto::sync::{ ControlIssueGuarantee, InitialTransmission, LogicalChannel, Message, SetupBindAreaOfInterest, @@ -24,12 +25,11 @@ use crate::{ capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, data, - events::{EventKind, EventSender, SessionEvent}, - intents::{self, IntentData}, + intents::{self, EventKind, Intent}, pai_finder::{self as pai, PaiFinder, PaiIntersection}, reconciler, static_tokens::StaticTokens, - Channels, Error, Role, SessionId, SessionInit, SessionUpdate, + Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionInit, SessionUpdate, }, store::{ traits::{SecretStorage, Storage}, @@ -58,27 +58,20 @@ const INITIAL_GUARANTEES: u64 = u64::MAX; pub async fn run_session( store: Store, - channels: Channels, + conn: WillowConn, + initial_intents: Vec, cancel_token: CancellationToken, session_id: SessionId, - our_role: Role, - // init: SessionInit, - initial_intents: Vec, - initial_transmission: InitialTransmission, event_sender: EventSender, update_receiver: impl Stream + Unpin + 'static, ) -> Result<(), Arc> { - // TODO: update mode to live on intent changes - let mode = initial_intents - .iter() - .fold(SessionMode::ReconcileOnce, |cur, intent| { - match intent.init.mode { - SessionMode::ReconcileOnce => cur, - SessionMode::Live => SessionMode::Live, - } - }); - - debug!(role = ?our_role, ?mode, "start session"); + let WillowConn { + peer: _, + initial_transmission, + our_role, + channels, + join_handle, + } = conn; let Channels { send: channel_sender, recv, @@ -96,6 +89,18 @@ pub async fn run_session( }, } = recv; + // TODO: update mode to live on intent changes + let mode = initial_intents + .iter() + .fold(SessionMode::ReconcileOnce, |cur, intent| { + match intent.init.mode { + SessionMode::ReconcileOnce => cur, + SessionMode::Live => SessionMode::Live, + } + }); + + debug!(role = ?our_role, ?mode, "start session"); + // Make all our receivers close once the cancel_token is triggered. let control_recv = Cancelable::new(control_recv, cancel_token.clone()); let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); @@ -131,6 +136,19 @@ pub async fn run_session( (None, None) }; + let net_fut = with_span(error_span!("net"), async { + // TODO: awaiting the net task handle hangs + drop(join_handle); + // let res = join_handle.await; + // debug!(?res, "net tasks finished"); + // match res { + // Ok(Ok(())) => Ok(()), + // Ok(Err(err)) => Err(Error::Net(err)), + // Err(err) => Err(Error::Net(err.into())), + // } + Ok(()) + }); + let mut intents = intents::IntentDispatcher::new(store.auth().clone(), initial_intents); let intents_fut = with_span(error_span!("intents"), async { use intents::Output; @@ -150,7 +168,6 @@ pub async fn run_session( } } } - debug!("done"); Ok(()) }); @@ -351,6 +368,7 @@ pub async fn run_session( }); let result = ( + net_fut, intents_fut, control_loop, data_loop, @@ -424,7 +442,7 @@ async fn control_loop( match message { Message::CommitmentReveal(msg) => { caps.received_commitment_reveal(our_role, msg.nonce)?; - event_sender.send(SessionEvent::Revealed).await?; + event_sender.send(SessionEvent::Established).await?; } Message::ControlIssueGuarantee(msg) => { let ControlIssueGuarantee { amount, channel } = msg; From 0966fb94bacbe6ab000cd4543235f1e7a94760a3 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 02:00:52 +0200 Subject: [PATCH 089/198] chore: cargo fix --- iroh-willow/src/auth.rs | 2 +- iroh-willow/src/engine.rs | 11 ++++---- iroh-willow/src/engine/actor.rs | 12 +++------ iroh-willow/src/engine/peer_manager.rs | 36 +++++++------------------ iroh-willow/src/net.rs | 7 ++--- iroh-willow/src/proto/grouping.rs | 2 +- iroh-willow/src/session.rs | 9 +++---- iroh-willow/src/session/aoi_finder.rs | 10 +++---- iroh-willow/src/session/capabilities.rs | 4 +-- iroh-willow/src/session/intents.rs | 27 ++++++------------- iroh-willow/src/session/pai_finder.rs | 9 ++----- iroh-willow/src/session/reconciler.rs | 4 +-- iroh-willow/src/session/run.rs | 30 +++++++-------------- iroh-willow/src/store/entry.rs | 1 - 14 files changed, 50 insertions(+), 114 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index 4c414256cf..ec33c8cdca 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeSet, HashMap, HashSet}, + collections::{HashMap, HashSet}, sync::{Arc, RwLock}, }; diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index da04319d36..8e8faff1bf 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -73,23 +73,22 @@ impl std::ops::Deref for Engine { #[cfg(test)] mod tests { use bytes::Bytes; - use iroh_net::{Endpoint, NodeAddr, NodeId}; + use iroh_net::{Endpoint, NodeId}; use rand::SeedableRng; use rand_chacha::ChaCha12Rng; - use std::collections::HashMap; use crate::{ auth::{CapSelector, DelegateTo}, engine::Engine, - form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, + form::EntryForm, net::ALPN, proto::{ - grouping::{Area, AreaOfInterest, ThreeDRange}, + grouping::Area, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, - willow::{Entry, InvalidPath, Path}, + willow::Path, }, - session::{intents::EventKind, Interests, Role, SessionInit, SessionMode}, + session::{intents::EventKind, Interests, SessionInit, SessionMode}, }; fn create_rng(seed: &str) -> ChaCha12Rng { diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 291aa5052a..297274af86 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -1,8 +1,7 @@ -use std::{collections::HashMap, sync::Arc, thread::JoinHandle}; +use std::{sync::Arc, thread::JoinHandle}; use anyhow::{Context, Result}; -use futures_lite::{future::Boxed as BoxFuture, stream::Stream, StreamExt}; -use futures_util::future::{self, FutureExt}; +use futures_lite::{stream::Stream, StreamExt}; use iroh_base::key::NodeId; use tokio::{ sync::{mpsc, oneshot}, @@ -19,20 +18,17 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, - sync::InitialTransmission, willow::{AuthorisedEntry, Entry}, }, session::{ - intents::{EventKind, Intent}, - run_session, Channels, Error, EventSender, Interests, Role, SessionEvent, SessionHandle, - SessionId, SessionInit, SessionUpdate, + intents::Intent, run_session, Error, EventSender, Interests, SessionHandle, SessionId, }, store::{ entry::EntryOrigin, traits::{EntryReader, SecretStorage, Storage}, Store, }, - util::task::{JoinMap, TaskKey}, + util::task::JoinMap, }; pub const INBOX_CAP: usize = 1024; diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index ceac81e07d..9560b8cbd5 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -1,44 +1,28 @@ -use std::{ - collections::{hash_map, HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use anyhow::{anyhow, Context, Result}; use futures_buffered::join_all; -use futures_concurrency::future::future_group; -use futures_concurrency::future::Join; + use futures_lite::StreamExt; use futures_util::FutureExt; use iroh_net::{ - dialer::Dialer, endpoint::{get_remote_node_id, Connection}, - util::SharedAbortingJoinHandle, Endpoint, NodeId, }; use tokio::{ - io::Interest, - sync::{mpsc, oneshot}, - task::{AbortHandle, JoinHandle, JoinSet}, + sync::mpsc, + task::{AbortHandle, JoinSet}, }; -use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, Instrument}; +use tokio_stream::{wrappers::ReceiverStream, StreamMap}; + +use tracing::debug; use crate::{ - auth::{Auth, InterestMap}, - net::{setup, WillowConn, ALPN}, - proto::{ - grouping::{Area, AreaOfInterest}, - keys::NamespaceId, - sync::{ReadAuthorisation, ReadCapability}, - }, + net::{WillowConn, ALPN}, session::{ - error::ChannelReceiverDropped, - intents::{EventKind, Intent, IntentHandle}, - Error, Interests, Role, SessionEvent, SessionHandle, SessionId, SessionInit, SessionMode, - SessionUpdate, + intents::Intent, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, + SessionMode, SessionUpdate, }, - store::traits::Storage, }; use super::actor::ActorHandle; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 87c536f332..0db6c4ffc6 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -21,7 +21,7 @@ use crate::{ LogicalChannelSenders, }, intents::Intent, - Role, SessionHandle, SessionInit, + Role, SessionHandle, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -336,10 +336,7 @@ mod tests { meadowcap::AccessMode, willow::{Entry, InvalidPath, Path}, }, - session::{ - intents::{Intent, IntentHandle}, - Interests, Role, SessionInit, SessionMode, - }, + session::{intents::Intent, Interests, Role, SessionInit, SessionMode}, }; const ALPN: &[u8] = b"iroh-willow/0"; diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 447bcb4a49..775a02770f 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -672,7 +672,7 @@ impl<'a> Encoder for AreaInArea<'a> { #[cfg(test)] mod tests { - use std::collections::{BTreeSet, HashSet}; + use std::collections::HashSet; use crate::proto::{grouping::Area, willow::Path}; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index f5db3a4998..1c6b44948a 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,18 +1,15 @@ use std::{ - collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{hash_map, BTreeSet, HashMap, HashSet}, sync::Arc, }; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use crate::{ auth::CapSelector, proto::{grouping::AreaOfInterest, sync::ReadAuthorisation}, - session::{ - error::ChannelReceiverDropped, - intents::{EventKind, Intent}, - }, + session::{error::ChannelReceiverDropped, intents::Intent}, }; mod aoi_finder; diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 326b041090..39536aeb6f 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -1,8 +1,7 @@ -use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc}; +use std::collections::hash_map; use futures_lite::{Stream, StreamExt}; use genawaiter::rc::Co; -use tokio::sync::mpsc; use crate::{ auth::InterestMap, @@ -15,11 +14,8 @@ use crate::{ }, }, session::{ - capabilities::Capabilities, - channels::ChannelSenders, - pai_finder::{self, PaiIntersection}, - resource::ResourceMap, - Error, Scope, + capabilities::Capabilities, pai_finder::PaiIntersection, resource::ResourceMap, Error, + Scope, }, util::gen_stream::GenStream, }; diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index ea3508cb35..f4d89f6fdb 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -5,8 +5,6 @@ use std::{ task::{ready, Poll, Waker}, }; -use tokio::sync::Notify; - use crate::{ proto::{ challenge::ChallengeState, @@ -17,7 +15,7 @@ use crate::{ SubspaceCapability, }, }, - session::{channels::ChannelSenders, resource::ResourceMap, Error, Role}, + session::{resource::ResourceMap, Error, Role}, store::traits::SecretStorage, }; diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 158fe14f48..140784659e 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -1,37 +1,26 @@ use std::{ - collections::{hash_map, HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, future::Future, sync::Arc, }; -use anyhow::{anyhow, Context, Result}; +use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use futures_util::{FutureExt, Sink}; +use futures_util::FutureExt; use genawaiter::rc::Co; -use iroh_net::{ - dialer::Dialer, endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId, -}; -use tokio::{ - io::Interest, - sync::{mpsc, oneshot}, - task::{AbortHandle, JoinHandle, JoinSet}, -}; + +use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; -use tokio_util::sync::{CancellationToken, PollSender}; -use tracing::{debug, error_span, Instrument}; +use tokio_util::sync::PollSender; +use tracing::debug; use crate::{ auth::{Auth, InterestMap}, - net::{setup, ALPN}, proto::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, - sync::{ReadAuthorisation, ReadCapability}, - }, - session::{ - error::ChannelReceiverDropped, Error, Interests, Role, SessionHandle, SessionId, - SessionInit, SessionMode, SessionUpdate, }, + session::{error::ChannelReceiverDropped, Error, Interests, SessionInit, SessionMode}, store::traits::Storage, util::gen_stream::GenStream, }; diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 5277d6fb07..8f956e5d74 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -8,16 +8,11 @@ //! Licensed under LGPL and ported into this MIT/Apache codebase with explicit permission //! from the original author (gwil). -use std::{ - collections::{HashMap, HashSet}, - future::Future, - pin::Pin, - task::{Context, Poll}, -}; +use std::collections::{HashMap, HashSet}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use genawaiter::{rc::Gen, GeneratorState}; + use tracing::{debug, trace}; use crate::{ diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 19b3384500..e26b403c05 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,7 +1,6 @@ use std::{ collections::{HashMap, HashSet}, num::NonZeroU64, - ops::ControlFlow, }; use bytes::Bytes; @@ -39,10 +38,9 @@ use crate::{ session::{ aoi_finder::AoiIntersection, channels::{ChannelSenders, MessageReceiver}, - intents::EventKind, payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, - Error, Role, SessionId, SessionMode, + Error, Role, SessionId, }, store::{ entry::{EntryChannel, EntryOrigin}, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 32504e55ba..10b72c6ca7 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,41 +1,29 @@ -use std::{cell::RefCell, collections::hash_map, future::Future, rc::Rc, sync::Arc}; +use std::{future::Future, sync::Arc}; -use futures_concurrency::{ - future::{Join, TryJoin}, - stream::StreamExt as _, -}; +use futures_concurrency::{future::TryJoin, stream::StreamExt as _}; use futures_lite::{Stream, StreamExt as _}; -use futures_util::{Sink, SinkExt}; -use genawaiter::GeneratorState; use strum::IntoEnumIterator; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; -use tokio_util::sync::{CancellationToken, PollSender}; -use tracing::{debug, error_span, trace, warn, Instrument, Span}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error_span, Instrument, Span}; use crate::{ - auth::InterestMap, net::WillowConn, - proto::sync::{ - ControlIssueGuarantee, InitialTransmission, LogicalChannel, Message, - SetupBindAreaOfInterest, - }, + proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{ aoi_finder::{self, IntersectionFinder}, capabilities::Capabilities, channels::{ChannelSenders, LogicalChannelReceivers}, data, intents::{self, EventKind, Intent}, - pai_finder::{self as pai, PaiFinder, PaiIntersection}, + pai_finder::{self as pai, PaiFinder}, reconciler, static_tokens::StaticTokens, - Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionInit, SessionUpdate, - }, - store::{ - traits::{SecretStorage, Storage}, - Store, + Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, }, - util::{channel::Receiver, stream::Cancelable, task::SharedJoinMap}, + store::{traits::Storage, Store}, + util::{channel::Receiver, stream::Cancelable}, }; use super::{ diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index 88fa1b97ab..ca1aecddfa 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -1,4 +1,3 @@ -use iroh_net::NodeId; use std::{ collections::HashMap, sync::{Arc, Mutex}, From 22d148d9c77e1d124d931f7832761c66f31ce1e9 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 10:12:04 +0200 Subject: [PATCH 090/198] cleanup and fixes --- iroh-willow/src/engine.rs | 3 +- iroh-willow/src/engine/actor.rs | 37 +++++------- iroh-willow/src/engine/peer_manager.rs | 77 +++++++++++++------------ iroh-willow/src/session.rs | 15 ++++- iroh-willow/src/session/capabilities.rs | 22 +++---- iroh-willow/src/session/intents.rs | 40 +++++++++++-- iroh-willow/src/session/run.rs | 23 +------- 7 files changed, 115 insertions(+), 102 deletions(-) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 8e8faff1bf..cf607c18be 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -4,6 +4,7 @@ use tokio::sync::mpsc; use tracing::{error_span, Instrument}; use crate::{ + engine::peer_manager::PeerManager, session::{ intents::{Intent, IntentHandle}, SessionInit, @@ -15,7 +16,6 @@ mod actor; mod peer_manager; pub use self::actor::ActorHandle; -pub use self::peer_manager::PeerManager; const PEER_MANAGER_INBOX_CAP: usize = 128; @@ -73,6 +73,7 @@ impl std::ops::Deref for Engine { #[cfg(test)] mod tests { use bytes::Bytes; + use futures_lite::StreamExt; use iroh_net::{Endpoint, NodeId}; use rand::SeedableRng; use rand_chacha::ChaCha12Rng; diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 297274af86..0609c2707d 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, thread::JoinHandle}; -use anyhow::{Context, Result}; -use futures_lite::{stream::Stream, StreamExt}; +use anyhow::Result; +use futures_lite::stream::Stream; use iroh_base::key::NodeId; use tokio::{ sync::{mpsc, oneshot}, @@ -20,15 +20,12 @@ use crate::{ meadowcap::{self, AccessMode}, willow::{AuthorisedEntry, Entry}, }, - session::{ - intents::Intent, run_session, Error, EventSender, Interests, SessionHandle, SessionId, - }, + session::{intents::Intent, run_session, Error, EventSender, Interests, SessionHandle}, store::{ entry::EntryOrigin, traits::{EntryReader, SecretStorage, Storage}, Store, }, - util::task::JoinMap, }; pub const INBOX_CAP: usize = 1024; @@ -56,9 +53,7 @@ impl ActorHandle { .spawn(move || { let span = error_span!("willow-actor", me=%me.fmt_short()); let _guard = span.enter(); - - let store = (create_store)(); - let store = Store::new(store); + let store = Store::new((create_store)()); let actor = Actor::new(store, inbox_rx); if let Err(error) = actor.run() { error!(?error, "willow actor failed"); @@ -71,10 +66,12 @@ impl ActorHandle { join_handle, } } - pub async fn send(&self, action: Input) -> Result<()> { + + async fn send(&self, action: Input) -> Result<()> { self.inbox_tx.send_async(action).await?; Ok(()) } + pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.send(Input::IngestEntry { @@ -285,7 +282,6 @@ struct Actor { inbox_rx: flume::Receiver, store: Store, next_session_id: u64, - session_tasks: JoinMap>>, tasks: JoinSet<()>, } @@ -295,7 +291,6 @@ impl Actor { store, inbox_rx, next_session_id: 0, - session_tasks: Default::default(), tasks: Default::default(), } } @@ -314,10 +309,7 @@ impl Actor { msg = self.inbox_rx.recv_async() => match msg { Err(_) => break, Ok(Input::Shutdown { reply }) => { - tokio::join!( - self.tasks.shutdown(), - self.session_tasks.shutdown() - ); + self.tasks.shutdown().await; drop(self); if let Some(reply) = reply { reply.send(()).ok(); @@ -330,10 +322,6 @@ impl Actor { } } }, - Some((id, res)) = self.session_tasks.next(), if !self.session_tasks.is_empty() => { - let res = res.context("session task paniced")?; - debug!(?id, ?res, "session complete"); - } }; } Ok(()) @@ -374,12 +362,13 @@ impl Actor { ) .instrument(error_span!("session", peer = %peer.fmt_short())); - let _task_key = self.session_tasks.spawn_local(session_id, future); + self.tasks.spawn_local(async move { + if let Err(err) = future.await { + tracing::debug!(?peer, ?session_id, ?err, "session failed"); + } + }); - // let active_session = ActiveSession { task_key }; - // self.sessions.insert(session_id, active_session); let handle = SessionHandle { - // session_id, cancel_token, update_tx, event_rx, diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 9560b8cbd5..ccff8b73fb 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -15,7 +15,7 @@ use tokio::{ }; use tokio_stream::{wrappers::ReceiverStream, StreamMap}; -use tracing::debug; +use tracing::{debug, trace}; use crate::{ net::{WillowConn, ALPN}, @@ -27,8 +27,10 @@ use crate::{ use super::actor::ActorHandle; +const ERROR_CODE_IGNORE_CONN: u8 = 1; + #[derive(derive_more::Debug)] -pub enum Input { +pub(super) enum Input { SubmitIntent { peer: NodeId, intent: Intent, @@ -40,7 +42,7 @@ pub enum Input { } #[derive(derive_more::Debug)] -pub struct PeerManager { +pub(super) struct PeerManager { actor: ActorHandle, endpoint: Endpoint, inbox: mpsc::Receiver, @@ -50,7 +52,7 @@ pub struct PeerManager { } impl PeerManager { - pub fn new( + pub(super) fn new( actor_handle: ActorHandle, endpoint: Endpoint, inbox: mpsc::Receiver, @@ -64,19 +66,20 @@ impl PeerManager { peers: Default::default(), } } - pub async fn run(mut self) -> Result<(), Error> { + + pub(super) async fn run(mut self) -> Result<(), Error> { loop { tokio::select! { Some(input) = self.inbox.recv() => { - debug!(?input, "tick: inbox"); + trace!(?input, "tick: inbox"); self.handle_input(input).await; } Some((session_id, event)) = self.events_rx.next(), if !self.events_rx.is_empty() => { - debug!(?session_id, ?event, "tick: event"); + trace!(?session_id, ?event, "tick: event"); self.handle_event(session_id, event); } Some(res) = self.tasks.join_next(), if !self.tasks.is_empty() => { - debug!("tick: task.join_next"); + trace!("tick: task joined"); match res { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("establish task paniced")?, @@ -90,16 +93,10 @@ impl PeerManager { Ok(()) } - pub async fn handle_input(&mut self, input: Input) { + async fn handle_input(&mut self, input: Input) { match input { - Input::SubmitIntent { peer, intent } => { - if let Err(err) = self.submit_intent(peer, intent).await { - tracing::warn!("failed to submit intent: {err:?}"); - } - } - Input::HandleConnection { conn } => { - self.handle_connection(conn); - } + Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, + Input::HandleConnection { conn } => self.handle_connection(conn), } } @@ -107,7 +104,7 @@ impl PeerManager { let peer = match get_remote_node_id(&conn) { Ok(node_id) => node_id, Err(err) => { - tracing::debug!("ignore incoming connection (QUIC handshake failed: {err})"); + tracing::debug!("ignore incoming connection (failed to get remote node id: {err})"); return; } }; @@ -118,7 +115,7 @@ impl PeerManager { let abort_handle = self .tasks .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); - let init = SessionInit::new(Interests::All, SessionMode::Live); + let init = SessionInit::continuous(Interests::All); let intent = Intent::new_detached(init); self.peers.insert( peer, @@ -138,15 +135,15 @@ impl PeerManager { tracing::debug!( "ignore incoming connection (already dialing and our dial wins)" ); - conn.close(0u8.into(), b"duplicate-our-dial-wins"); + conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-our-dial-wins"); } else { - // abort our dial attempt + // Abort our dial attempt. abort_handle.abort(); - // set the new abort handle + // Set the new abort handle. *abort_handle = self .tasks .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); - // add catchall interest + // Add a catchall interest. let init = SessionInit::new(Interests::All, SessionMode::Live); let intent = Intent::new_detached(init); intents.push(intent); @@ -157,18 +154,24 @@ impl PeerManager { .. }) => { tracing::debug!("ignore incoming connection (already accepting)"); - conn.close(0u8.into(), b"duplicate-already-accepting"); + conn.close( + ERROR_CODE_IGNORE_CONN.into(), + b"duplicate-already-accepting", + ); } Some(PeerState::Active { .. }) => { - tracing::debug!("got connection for already active peer"); - conn.close(0u8.into(), b"duplicate-already-accepting"); + tracing::debug!("ignore incoming connection (already connected)"); + conn.close( + ERROR_CODE_IGNORE_CONN.into(), + b"duplicate-already-accepting", + ); } } } async fn failed_to_connect(&mut self, peer: NodeId, error: Arc) { let Some(peer_state) = self.peers.remove(&peer) else { - tracing::warn!(?peer, "attempted to remove unknown peer"); + tracing::warn!(?peer, "connection failure for unknown peer"); return; }; match peer_state { @@ -181,7 +184,7 @@ impl PeerManager { .await; } PeerState::Active { .. } => { - unreachable!("we don't accept connections for active peers") + unreachable!("we never handle connections for active peers") } }; } @@ -209,7 +212,7 @@ impl PeerManager { Ok(()) } - pub async fn submit_intent(&mut self, peer: NodeId, intent: Intent) -> Result<()> { + async fn submit_intent(&mut self, peer: NodeId, intent: Intent) { match self.peers.get_mut(&peer) { None => { let intents = vec![intent]; @@ -235,19 +238,22 @@ impl PeerManager { intents.push(intent); } PeerState::Active { update_tx, .. } => { - update_tx.send(SessionUpdate::SubmitIntent(intent)).await?; + if let Err(intent) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { + let SessionUpdate::SubmitIntent(intent) = intent.0; + intent.send_abort(Arc::new(Error::ActorFailed)).await; + } } }, }; - Ok(()) } - pub fn handle_event(&mut self, peer: NodeId, event: SessionEvent) { + fn handle_event(&mut self, peer: NodeId, event: SessionEvent) { tracing::info!(?event, "event"); match event { SessionEvent::Established => {} SessionEvent::Complete { .. } => { - self.peers.remove(&peer); + let state = self.peers.remove(&peer); + debug_assert!(matches!(state, Some(PeerState::Active { .. }))); } } } @@ -262,10 +268,5 @@ enum PeerState { }, Active { update_tx: mpsc::Sender, - // cancel_token: CancellationToken, }, } - -#[derive(Debug, thiserror::Error)] -#[error("receiver dropped")] -pub struct ReceiverDropped; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 1c6b44948a..f3a138a4bf 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -146,6 +146,14 @@ impl SessionInit { let interests = interests.into(); Self { interests, mode } } + + pub fn continuous(interests: impl Into) -> Self { + Self::new(interests, SessionMode::Live) + } + + pub fn reconcile_once(interests: impl Into) -> Self { + Self::new(interests, SessionMode::ReconcileOnce) + } } /// The bind scope for resources. @@ -194,8 +202,11 @@ impl SessionHandle { Ok(()) } - pub async fn send_update(&self, update: SessionUpdate) -> anyhow::Result<()> { - self.update_tx.send(update).await?; + /// Submit a new synchronisation intent. + pub async fn submit_intent(&self, intent: Intent) -> anyhow::Result<()> { + self.update_tx + .send(SessionUpdate::SubmitIntent(intent)) + .await?; Ok(()) } diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index f4d89f6fdb..d99534a847 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -44,17 +44,17 @@ impl Capabilities { }))) } - pub fn revealed(&self) -> impl Future + '_ { - std::future::poll_fn(|cx| { - let mut inner = self.0.borrow_mut(); - if inner.challenge.is_revealed() { - Poll::Ready(()) - } else { - inner.on_reveal_wakers.push(cx.waker().to_owned()); - Poll::Pending - } - }) - } + // pub fn revealed(&self) -> impl Future + '_ { + // std::future::poll_fn(|cx| { + // let mut inner = self.0.borrow_mut(); + // if inner.challenge.is_revealed() { + // Poll::Ready(()) + // } else { + // inner.on_reveal_wakers.push(cx.waker().to_owned()); + // Poll::Pending + // } + // }) + // } pub fn is_revealed(&self) -> bool { self.0.borrow().challenge.is_revealed() diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 140784659e..8853742292 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -1,7 +1,9 @@ use std::{ collections::{HashMap, HashSet, VecDeque}, future::Future, + pin::Pin, sync::Arc, + task::{Context, Poll}, }; use anyhow::Result; @@ -73,6 +75,11 @@ pub enum IntentUpdate { } /// A synchronisation intent. +/// +/// An intent contains a list of interests to sync, and optionally sends events from the session to +/// a [`IntentHandle`]. The [`IntentHandle`] can also submit updates to the list of interests. +/// +/// Alternatively, an intent can be *detached*, which means that no events or updates are sent. #[derive(Debug)] pub struct Intent { pub(super) init: SessionInit, @@ -137,7 +144,12 @@ impl Intent { } } -/// Handle to a [`Intent`] +/// Handle to a [`Intent`]. +/// +/// The [`IntentHandle`] is a [`Stream`] of [`EventKind`]. It *must* be progressed in a loop, +/// otherwise the session will be blocked from progressing. +/// +/// The [`IntentHandle`] can also submit new interests into the session. #[derive(Debug)] pub struct IntentHandle { event_rx: Receiver, @@ -145,6 +157,7 @@ pub struct IntentHandle { } impl IntentHandle { + /// Split the [`IntentHandle`] into a update sink and event stream. pub fn split(self) -> (PollSender, ReceiverStream) { ( PollSender::new(self.update_tx), @@ -152,10 +165,14 @@ impl IntentHandle { ) } - pub async fn next(&mut self) -> Option { - self.event_rx.recv().await - } - + /// Wait for the intent to be completed. + /// + /// This future completes either if the session terminated, or if all interests of the intent + /// are reconciled and the intent is not in live data mode. + /// + /// Note that successful completion of this future does not guarantee that all interests were + /// fulfilled. If you need to know that, use the [`IntentHandle`] as a stream and wait for the + /// [`EventKind::ReconciledAll`] event. pub async fn complete(&mut self) -> Result<(), Arc> { while let Some(event) = self.event_rx.recv().await { if let EventKind::Abort { error } = event { @@ -165,6 +182,10 @@ impl IntentHandle { Ok(()) } + /// Submit new synchronisation interests into the session. + /// + /// The [`IntentHandle`] will then receive events for these interests in addition to already + /// submitted interests. pub async fn add_interests(&self, interests: impl Into) -> Result<()> { self.update_tx .send(IntentUpdate::AddInterests(interests.into())) @@ -172,11 +193,20 @@ impl IntentHandle { Ok(()) } + /// Close the intent. pub async fn close(&self) { self.update_tx.send(IntentUpdate::Close).await.ok(); } } +impl Stream for IntentHandle { + type Item = EventKind; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.event_rx).poll_recv(cx) + } +} + #[derive(Debug)] struct IntentChannels { event_tx: Sender, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 10b72c6ca7..aa7a4ed780 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -36,14 +36,6 @@ use super::{ const INITIAL_GUARANTEES: u64 = u64::MAX; -// struct Session { -// session_id: SessionId, -// our_role: Role, -// initial_transmission: InitialTransmission, -// event_sender: EventSender, -// cancel_token: CancellationToken, -// } - pub async fn run_session( store: Store, conn: WillowConn, @@ -77,7 +69,7 @@ pub async fn run_session( }, } = recv; - // TODO: update mode to live on intent changes + // TODO: make mode change on intent changes let mode = initial_intents .iter() .fold(SessionMode::ReconcileOnce, |cur, intent| { @@ -233,12 +225,6 @@ pub async fn run_session( Ok(()) }); - let pai_init = with_span(error_span!("pai-init"), async { - caps.revealed().await; - pai_inbox.send(pai::Input::Established).await?; - Ok(()) - }); - let pai_loop = with_span(error_span!("pai"), async { use pai::Output; let inbox = pai_inbox_rx.merge(intersection_recv.map(pai::Input::ReceivedMessage)); @@ -361,7 +347,6 @@ pub async fn run_session( control_loop, data_loop, update_loop, - pai_init, pai_loop, intersection_loop, reconciler_loop, @@ -430,6 +415,7 @@ async fn control_loop( match message { Message::CommitmentReveal(msg) => { caps.received_commitment_reveal(our_role, msg.nonce)?; + pai_inbox.send(pai::Input::Established).await?; event_sender.send(SessionEvent::Established).await?; } Message::ControlIssueGuarantee(msg) => { @@ -464,11 +450,6 @@ async fn control_loop( Ok(()) } -// fn channel(cap: usize) -> (Sender, ReceiverStream) { -// let (tx, rx) = mpsc::channel(cap); -// (Sender(tx), ReceiverStream::new(rx)) -// } - fn cancelable_channel( cap: usize, cancel_token: CancellationToken, From 2d9e12766caa983089acbf0131e4e7ad2dd9e87d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 12:08:47 +0200 Subject: [PATCH 091/198] address review comments --- Cargo.toml | 13 ++++----- iroh-willow/src/engine.rs | 2 +- iroh-willow/src/net.rs | 16 +++++------ iroh-willow/src/session/capabilities.rs | 2 +- iroh-willow/src/session/channels.rs | 36 ++++++++++++------------- iroh-willow/src/store/memory.rs | 2 +- 6 files changed, 34 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1efcf0d6d4..37706dd4cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,14 +30,11 @@ opt-level = 3 panic = 'abort' incremental = false -[profile.dev.package.ed25519-dalek] -opt-level = 3 - -[profile.dev.package.curve25519-dalek] -opt-level = 3 - -[profile.dev.package.iroh-blake3] -opt-level = 3 +[profile.dev.package] +# optimize crypto dependencies in dev mode +ed25519-dalek = { opt-level = 3 } +curve25519-dalek = { opt-level = 3 } +iroh-blake3 = { opt-level = 3 } [workspace.lints.rust] missing_debug_implementations = "warn" diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index cf607c18be..f3740b7833 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -329,7 +329,7 @@ mod tests { let conn = conn.await?; handle.handle_connection(conn).await?; } - Ok::<_, anyhow::Error>(()) + anyhow::Result::Ok(()) } }); Ok((handle, endpoint, node_addr, accept_task)) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 0db6c4ffc6..012fc1f7dd 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -148,7 +148,7 @@ async fn open_logical_channels( let (mut send, recv) = conn.open_bi().await?; send.write_u8(ch.id()).await?; trace!(?ch, "opened bi stream"); - Result::<_, anyhow::Error>::Ok((ch, Some((send, recv)))) + Ok::<_, anyhow::Error>((ch, Some((send, recv)))) } }) .try_join() @@ -165,7 +165,7 @@ async fn open_logical_channels( trace!("read channel id {channel_id}"); let channel = LogicalChannel::from_id(channel_id)?; trace!("accepted bi stream for logical channel {channel:?}"); - Result::<_, anyhow::Error>::Ok((channel, Some((send, recv)))) + anyhow::Result::Ok((channel, Some((send, recv)))) }) .try_join() .await @@ -200,12 +200,12 @@ async fn open_logical_channels( Ok(( LogicalChannelSenders { - intersection: pai.0, - reconciliation: rec.0, - static_tokens: stt.0, - aoi: aoi.0, - capability: cap.0, - data: dat.0, + intersection_send: pai.0, + reconciliation_send: rec.0, + static_tokens_send: stt.0, + aoi_send: aoi.0, + capability_send: cap.0, + data_send: dat.0, }, LogicalChannelReceivers { intersection_recv: pai.1.into(), diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index d99534a847..c1cdff26f9 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -1,6 +1,6 @@ use std::{ cell::RefCell, - future::{poll_fn, Future}, + future::poll_fn, rc::Rc, task::{ready, Poll, Waker}, }; diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 85531288bd..694fbe5ae7 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -87,31 +87,31 @@ impl LogicalChannelReceivers { #[derive(Debug, Clone)] pub struct LogicalChannelSenders { - pub intersection: Sender, - pub reconciliation: Sender, - pub static_tokens: Sender, - pub aoi: Sender, - pub capability: Sender, - pub data: Sender, + pub intersection_send: Sender, + pub reconciliation_send: Sender, + pub static_tokens_send: Sender, + pub aoi_send: Sender, + pub capability_send: Sender, + pub data_send: Sender, } impl LogicalChannelSenders { pub fn close(&self) { - self.intersection.close(); - self.reconciliation.close(); - self.static_tokens.close(); - self.aoi.close(); - self.capability.close(); - self.data.close(); + self.intersection_send.close(); + self.reconciliation_send.close(); + self.static_tokens_send.close(); + self.aoi_send.close(); + self.capability_send.close(); + self.data_send.close(); } pub fn get(&self, channel: LogicalChannel) -> &Sender { match channel { - LogicalChannel::Intersection => &self.intersection, - LogicalChannel::Reconciliation => &self.reconciliation, - LogicalChannel::StaticToken => &self.static_tokens, - LogicalChannel::Capability => &self.capability, - LogicalChannel::AreaOfInterest => &self.aoi, - LogicalChannel::Data => &self.data, + LogicalChannel::Intersection => &self.intersection_send, + LogicalChannel::Reconciliation => &self.reconciliation_send, + LogicalChannel::StaticToken => &self.static_tokens_send, + LogicalChannel::Capability => &self.capability_send, + LogicalChannel::AreaOfInterest => &self.aoi_send, + LogicalChannel::Data => &self.data_send, } } } diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index dc8db9e3ac..62a44ce4d5 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -181,7 +181,7 @@ impl traits::EntryReader for Rc> { .into_iter() .flatten() .filter(|entry| range.includes_entry(entry.entry())) - .map(|e| Result::<_, anyhow::Error>::Ok(e.clone())) + .map(|e| anyhow::Result::Ok(e.clone())) .collect::>() .into_iter() } From 94569e91a4f6cd3e142d34dc47433a38cd7a1276 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 12:37:13 +0200 Subject: [PATCH 092/198] refactor: pass in nonces to allow custom rng --- iroh-willow/src/engine/peer_manager.rs | 11 ++- iroh-willow/src/net.rs | 93 +++++++++++++++++--------- iroh-willow/src/proto/challenge.rs | 23 ++++--- iroh-willow/src/proto/sync.rs | 55 +++++++++++++-- 4 files changed, 131 insertions(+), 51 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index ccff8b73fb..52c2069396 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -19,6 +19,7 @@ use tracing::{debug, trace}; use crate::{ net::{WillowConn, ALPN}, + proto::sync::AccessChallenge, session::{ intents::Intent, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionMode, SessionUpdate, @@ -112,9 +113,11 @@ impl PeerManager { match self.peers.get_mut(&peer) { None => { + // TODO: Allow to pass RNG. + let our_nonce = AccessChallenge::generate(); let abort_handle = self .tasks - .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); + .spawn(WillowConn::betty(conn, me, our_nonce).map(move |res| (peer, res))); let init = SessionInit::continuous(Interests::All); let intent = Intent::new_detached(init); self.peers.insert( @@ -140,9 +143,10 @@ impl PeerManager { // Abort our dial attempt. abort_handle.abort(); // Set the new abort handle. + let our_nonce = AccessChallenge::generate(); *abort_handle = self .tasks - .spawn(WillowConn::betty(conn, me).map(move |res| (peer, res))); + .spawn(WillowConn::betty(conn, me, our_nonce).map(move |res| (peer, res))); // Add a catchall interest. let init = SessionInit::new(Interests::All, SessionMode::Live); let intent = Intent::new_detached(init); @@ -218,10 +222,11 @@ impl PeerManager { let intents = vec![intent]; let me = self.endpoint.node_id(); let endpoint = self.endpoint.clone(); + let our_nonce = AccessChallenge::generate(); let abort_handle = self.tasks.spawn( async move { let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; - let conn = WillowConn::alfie(conn, me).await?; + let conn = WillowConn::alfie(conn, me, our_nonce).await?; Ok(conn) } .map(move |res| (peer, res)), diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 012fc1f7dd..c5677cff39 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,7 +1,7 @@ use anyhow::ensure; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; -use iroh_base::{hash::Hash, key::NodeId}; +use iroh_base::key::NodeId; use iroh_net::endpoint::{get_remote_node_id, Connection, RecvStream, SendStream}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, @@ -10,9 +10,8 @@ use tokio::{ use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrument, Span}; use crate::{ - engine::ActorHandle, proto::sync::{ - AccessChallenge, Channel, InitialTransmission, LogicalChannel, Message, + AccessChallenge, ChallengeHash, Channel, InitialTransmission, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, session::{ @@ -20,8 +19,7 @@ use crate::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, - intents::Intent, - Role, SessionHandle, + Role, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -43,17 +41,31 @@ pub struct WillowConn { } impl WillowConn { - pub async fn alfie(conn: Connection, me: NodeId) -> anyhow::Result { - Self::connect(conn, me, Role::Alfie).await + pub async fn alfie( + conn: Connection, + me: NodeId, + our_nonce: AccessChallenge, + ) -> anyhow::Result { + Self::connect(conn, me, Role::Alfie, our_nonce).await } - pub async fn betty(conn: Connection, me: NodeId) -> anyhow::Result { - Self::connect(conn, me, Role::Betty).await + pub async fn betty( + conn: Connection, + me: NodeId, + our_nonce: AccessChallenge, + ) -> anyhow::Result { + Self::connect(conn, me, Role::Betty, our_nonce).await } - async fn connect(conn: Connection, me: NodeId, our_role: Role) -> anyhow::Result { + async fn connect( + conn: Connection, + me: NodeId, + our_role: Role, + our_nonce: AccessChallenge, + ) -> anyhow::Result { let peer = get_remote_node_id(&conn)?; - let (initial_transmission, channels, mut join_set) = setup(conn, me, our_role).await?; + let (initial_transmission, channels, mut join_set) = + setup(conn, me, our_role, our_nonce).await?; let join_handle = tokio::task::spawn(async move { join_all(&mut join_set).await }); Ok(Self { peer, @@ -66,10 +78,11 @@ impl WillowConn { } #[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] -pub async fn setup( +async fn setup( conn: Connection, me: NodeId, our_role: Role, + our_nonce: AccessChallenge, ) -> anyhow::Result<(InitialTransmission, Channels, JoinSet>)> { let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; Span::current().record("peer", tracing::field::display(peer.fmt_short())); @@ -84,8 +97,12 @@ pub async fn setup( control_send_stream.set_priority(i32::MAX)?; debug!("control channel ready"); - let initial_transmission = - exchange_commitments(&mut control_send_stream, &mut control_recv_stream).await?; + let initial_transmission = exchange_commitments( + &mut control_send_stream, + &mut control_recv_stream, + our_nonce, + ) + .await?; debug!("exchanged commitments"); let (control_send, control_recv) = spawn_channel( @@ -113,19 +130,6 @@ pub async fn setup( Ok((initial_transmission, channels, tasks)) } -#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] -pub async fn run( - me: NodeId, - actor: ActorHandle, - conn: Connection, - our_role: Role, - intents: Vec, -) -> anyhow::Result { - let conn = WillowConn::connect(conn, me, our_role).await?; - let handle = actor.init_session(conn, intents).await?; - Ok(handle) -} - #[derive(Debug, thiserror::Error)] #[error("missing channel: {0:?}")] struct MissingChannel(LogicalChannel); @@ -270,9 +274,9 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyho async fn exchange_commitments( send_stream: &mut SendStream, recv_stream: &mut RecvStream, + our_nonce: AccessChallenge, ) -> anyhow::Result { - let our_nonce: AccessChallenge = rand::random(); - let challenge_hash = Hash::new(our_nonce); + let challenge_hash = our_nonce.hash(); send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; send_stream.write_all(challenge_hash.as_bytes()).await?; @@ -286,7 +290,7 @@ async fn exchange_commitments( recv_stream.read_exact(&mut received_commitment).await?; Ok(InitialTransmission { our_nonce, - received_commitment, + received_commitment: ChallengeHash::from_bytes(received_commitment), their_max_payload_size, }) } @@ -320,7 +324,7 @@ mod tests { use futures_lite::StreamExt; use iroh_base::key::SecretKey; - use iroh_net::{Endpoint, NodeAddr, NodeId}; + use iroh_net::{endpoint::Connection, Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; use rand_chacha::ChaCha12Rng; use tracing::info; @@ -329,14 +333,15 @@ mod tests { auth::{CapSelector, DelegateTo}, engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, - net::run, + net::WillowConn, proto::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, + sync::AccessChallenge, willow::{Entry, InvalidPath, Path}, }, - session::{intents::Intent, Interests, Role, SessionInit, SessionMode}, + session::{intents::Intent, Interests, Role, SessionHandle, SessionInit, SessionMode}, }; const ALPN: &[u8] = b"iroh-willow/0"; @@ -346,6 +351,19 @@ mod tests { rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) } + pub async fn run( + me: NodeId, + actor: ActorHandle, + conn: Connection, + our_role: Role, + our_nonce: AccessChallenge, + intents: Vec, + ) -> anyhow::Result { + let conn = WillowConn::connect(conn, me, our_role, our_nonce).await?; + let handle = actor.init_session(conn, intents).await?; + Ok(handle) + } + #[tokio::test(flavor = "multi_thread")] async fn net_smoke() -> anyhow::Result<()> { iroh_test::logging::setup_multithreaded(); @@ -416,12 +434,15 @@ mod tests { info!("connecting took {:?}", start.elapsed()); let start = Instant::now(); + let nonce_alfie = AccessChallenge::generate_with_rng(&mut rng); + let nonce_betty = AccessChallenge::generate_with_rng(&mut rng); let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, handle_alfie.clone(), conn_alfie, Role::Alfie, + nonce_alfie, vec![intent_alfie] ), run( @@ -429,6 +450,7 @@ mod tests { handle_betty.clone(), conn_betty, Role::Betty, + nonce_betty, vec![intent_betty] ) ); @@ -561,12 +583,16 @@ mod tests { let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); + let nonce_alfie = AccessChallenge::generate_with_rng(&mut rng); + let nonce_betty = AccessChallenge::generate_with_rng(&mut rng); + let (session_alfie, session_betty) = tokio::join!( run( node_id_alfie, handle_alfie.clone(), conn_alfie, Role::Alfie, + nonce_alfie, vec![intent_alfie] ), run( @@ -574,6 +600,7 @@ mod tests { handle_betty.clone(), conn_betty, Role::Betty, + nonce_betty, vec![intent_betty] ) ); diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index 3ff6f77afa..a0d469c867 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -1,6 +1,7 @@ -use iroh_base::hash::Hash; - -use crate::session::{Error, Role}; +use crate::{ + proto::sync::AccessChallengeBytes, + session::{Error, Role}, +}; use super::{ keys::{UserPublicKey, UserSecretKey, UserSignature}, @@ -14,8 +15,8 @@ pub enum ChallengeState { received_commitment: ChallengeHash, }, Revealed { - ours: AccessChallenge, - theirs: AccessChallenge, + ours: AccessChallengeBytes, + theirs: AccessChallengeBytes, }, } @@ -26,12 +27,14 @@ impl ChallengeState { our_nonce, received_commitment, } => { - if Hash::new(their_nonce).as_bytes() != received_commitment { + if their_nonce.hash() != *received_commitment { return Err(Error::BrokenCommittement); } let ours = match our_role { - Role::Alfie => bitwise_xor(*our_nonce, their_nonce), - Role::Betty => bitwise_xor_complement(*our_nonce, their_nonce), + Role::Alfie => bitwise_xor(our_nonce.to_bytes(), their_nonce.to_bytes()), + Role::Betty => { + bitwise_xor_complement(our_nonce.to_bytes(), their_nonce.to_bytes()) + } }; let theirs = bitwise_complement(ours); *self = Self::Revealed { ours, theirs }; @@ -62,14 +65,14 @@ impl ChallengeState { Ok(()) } - fn get_ours(&self) -> Result<&AccessChallenge, Error> { + fn get_ours(&self) -> Result<&AccessChallengeBytes, Error> { match self { Self::Revealed { ours, .. } => Ok(ours), _ => Err(Error::InvalidMessageInCurrentState), } } - fn get_theirs(&self) -> Result<&AccessChallenge, Error> { + fn get_theirs(&self) -> Result<&AccessChallengeBytes, Error> { match self { Self::Revealed { theirs, .. } => Ok(theirs), _ => Err(Error::InvalidMessageInCurrentState), diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 043f60f15c..5e16396e92 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -1,7 +1,8 @@ use std::{fmt, io::Write, sync::Arc}; -use iroh_base::hash::Hash; - +use iroh_base::{base32::fmt_short, hash::Hash}; +use rand::Rng; +use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; use strum::{EnumCount, VariantArray}; @@ -28,8 +29,52 @@ pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); pub const CHALLENGE_LENGTH: usize = 32; pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; -pub type ChallengeHash = [u8; CHALLENGE_HASH_LENGTH]; -pub type AccessChallenge = [u8; CHALLENGE_LENGTH]; + +#[derive(derive_more::Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct ChallengeHash(#[debug("{}..", fmt_short(self.0))] [u8; CHALLENGE_HASH_LENGTH]); + +impl ChallengeHash { + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + pub fn from_bytes(bytes: [u8; CHALLENGE_HASH_LENGTH]) -> Self { + Self(bytes) + } +} + +#[derive(derive_more::Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct AccessChallenge(#[debug("{}..", fmt_short(self.0))] AccessChallengeBytes); + +pub type AccessChallengeBytes = [u8; CHALLENGE_LENGTH]; + +impl Default for AccessChallenge { + fn default() -> Self { + Self::generate() + } +} + +impl AccessChallenge { + pub fn generate() -> Self { + Self(rand::random()) + } + + pub fn generate_with_rng(rng: &mut impl CryptoRngCore) -> Self { + Self(rng.gen()) + } + + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } + + pub fn hash(&self) -> ChallengeHash { + ChallengeHash(*Hash::new(&self.0).as_bytes()) + } +} // In Meadowcap, for example, StaticToken is the type McCapability // and DynamicToken is the type UserSignature, @@ -312,7 +357,7 @@ impl IsHandle for IntersectionHandle { #[derive(Serialize, Deserialize, PartialEq, Eq, derive_more::Debug)] pub struct CommitmentReveal { /// The nonce of the sender, encoded as a big-endian unsigned integer. - #[debug("{}..", iroh_base::base32::fmt_short(self.nonce))] + #[debug("{}..", iroh_base::base32::fmt_short(self.nonce.0))] pub nonce: AccessChallenge, } From 870ab3f38fc82789e9957772edd5f8282f770310 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 18 Jul 2024 13:12:32 +0200 Subject: [PATCH 093/198] perf: use map not mutation for vecs --- iroh-willow/src/proto/pai.rs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/iroh-willow/src/proto/pai.rs b/iroh-willow/src/proto/pai.rs index fe80c93617..361da84590 100644 --- a/iroh-willow/src/proto/pai.rs +++ b/iroh-willow/src/proto/pai.rs @@ -153,19 +153,24 @@ impl FragmentKit { pub fn into_fragment_set(self) -> FragmentSet { match self { FragmentKit::Complete(namespace_id, path) => { - let mut pairs = vec![]; - for prefix in path.all_prefixes() { - pairs.push((namespace_id, prefix)); - } + let pairs = path + .all_prefixes() + .into_iter() + .map(|prefix| (namespace_id, prefix)) + .collect(); FragmentSet::Complete(pairs) } FragmentKit::Selective(namespace_id, subspace_id, path) => { - let mut primary = vec![]; - let mut secondary = vec![]; - for prefix in path.all_prefixes() { - primary.push((namespace_id, subspace_id, prefix.clone())); - secondary.push((namespace_id, prefix.clone())); - } + let all_prefixes = path.all_prefixes(); + let primary = all_prefixes + .iter() + .cloned() + .map(|prefix| (namespace_id, subspace_id, prefix)) + .collect(); + let secondary = all_prefixes + .into_iter() + .map(|prefix| (namespace_id, prefix)) + .collect(); FragmentSet::Selective { primary, secondary } } } From cf30c380078519860fa8b6d271f7f7c3c7241158 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 19 Jul 2024 00:27:31 +0200 Subject: [PATCH 094/198] refactor: use completion type for intent results --- iroh-willow/src/proto/sync.rs | 2 +- iroh-willow/src/session/intents.rs | 32 ++++++++++++++++++++++++++---- iroh-willow/src/session/run.rs | 2 +- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 5e16396e92..fa2e8a19af 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -72,7 +72,7 @@ impl AccessChallenge { } pub fn hash(&self) -> ChallengeHash { - ChallengeHash(*Hash::new(&self.0).as_bytes()) + ChallengeHash(*Hash::new(self.0).as_bytes()) } } diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 8853742292..b63dce7eab 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -144,6 +144,16 @@ impl Intent { } } +#[derive(Debug)] +pub enum Completion { + /// All interests were reconciled. + Complete, + /// Some interests were reconciled. + Partial, + /// No interests were reconciled. + Nothing, +} + /// Handle to a [`Intent`]. /// /// The [`IntentHandle`] is a [`Stream`] of [`EventKind`]. It *must* be progressed in a loop, @@ -173,13 +183,27 @@ impl IntentHandle { /// Note that successful completion of this future does not guarantee that all interests were /// fulfilled. If you need to know that, use the [`IntentHandle`] as a stream and wait for the /// [`EventKind::ReconciledAll`] event. - pub async fn complete(&mut self) -> Result<(), Arc> { + pub async fn complete(&mut self) -> Result> { + let mut complete = false; + let mut partial = false; while let Some(event) = self.event_rx.recv().await { - if let EventKind::Abort { error } = event { - return Err(error); + match event { + EventKind::ReconciledAll => complete = true, + // TODO: track partial reconciliations + EventKind::Reconciled { .. } => partial = true, + EventKind::Abort { error } => return Err(error), + _ => {} } } - Ok(()) + let completion = if complete { + Completion::Complete + } else if partial { + Completion::Partial + } else { + Completion::Nothing + }; + + Ok(completion) } /// Submit new synchronisation interests into the session. diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index aa7a4ed780..5604936f15 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -480,7 +480,7 @@ async fn with_span( span: Span, fut: impl Future>, ) -> Result { - async move { + async { tracing::debug!("start"); let res = fut.await; tracing::debug!(?res, "done"); From 42be3036be72348f5018ba43a7701a80f7e56618 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 19 Jul 2024 09:24:07 +0200 Subject: [PATCH 095/198] cleanup --- iroh-willow/src/engine/peer_manager.rs | 45 ++++++++++++-------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 52c2069396..2a9613cd32 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -85,7 +85,7 @@ impl PeerManager { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("establish task paniced")?, Ok((_peer, Ok(conn))) => self.on_established(conn).await?, - Ok((peer, Err(err))) => self.failed_to_connect(peer, Arc::new(Error::Net(err))).await, + Ok((peer, Err(err))) => self.on_establish_failed(peer, Arc::new(Error::Net(err))).await, } } else => break, @@ -173,7 +173,7 @@ impl PeerManager { } } - async fn failed_to_connect(&mut self, peer: NodeId, error: Arc) { + async fn on_establish_failed(&mut self, peer: NodeId, error: Arc) { let Some(peer_state) = self.peers.remove(&peer) else { tracing::warn!(?peer, "connection failure for unknown peer"); return; @@ -195,12 +195,12 @@ impl PeerManager { async fn on_established(&mut self, conn: WillowConn) -> anyhow::Result<()> { let peer = conn.peer; - let peer_state = self + let state = self .peers .remove(&peer) .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; - let PeerState::Pending { intents, .. } = peer_state else { + let PeerState::Pending { intents, .. } = state else { anyhow::bail!("unreachable: on_established called for peer in wrong state") }; @@ -219,36 +219,31 @@ impl PeerManager { async fn submit_intent(&mut self, peer: NodeId, intent: Intent) { match self.peers.get_mut(&peer) { None => { - let intents = vec![intent]; - let me = self.endpoint.node_id(); - let endpoint = self.endpoint.clone(); let our_nonce = AccessChallenge::generate(); - let abort_handle = self.tasks.spawn( + let abort_handle = self.tasks.spawn({ + let endpoint = self.endpoint.clone(); async move { let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; - let conn = WillowConn::alfie(conn, me, our_nonce).await?; - Ok(conn) + WillowConn::alfie(conn, endpoint.node_id(), our_nonce).await } - .map(move |res| (peer, res)), - ); - let peer_state = PeerState::Pending { - intents, + .map(move |res| (peer, res)) + }); + let state = PeerState::Pending { + intents: vec![intent], abort_handle, our_role: Role::Alfie, }; - self.peers.insert(peer, peer_state); + self.peers.insert(peer, state); } - Some(state) => match state { - PeerState::Pending { intents, .. } => { - intents.push(intent); - } - PeerState::Active { update_tx, .. } => { - if let Err(intent) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { - let SessionUpdate::SubmitIntent(intent) = intent.0; - intent.send_abort(Arc::new(Error::ActorFailed)).await; - } + Some(PeerState::Pending { intents, .. }) => { + intents.push(intent); + } + Some(PeerState::Active { update_tx, .. }) => { + if let Err(message) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { + let SessionUpdate::SubmitIntent(intent) = message.0; + intent.send_abort(Arc::new(Error::ActorFailed)).await; } - }, + } }; } From 0fa5ad233a69a5564575add218b0098ebbcd45c0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 22 Jul 2024 11:18:18 +0200 Subject: [PATCH 096/198] feat: add AcceptOpts to customize betty behavior --- iroh-willow/src/engine.rs | 16 +- iroh-willow/src/engine/peer_manager.rs | 240 +++++++++++++++++++------ 2 files changed, 196 insertions(+), 60 deletions(-) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index f3740b7833..74a2a21e15 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -4,7 +4,6 @@ use tokio::sync::mpsc; use tracing::{error_span, Instrument}; use crate::{ - engine::peer_manager::PeerManager, session::{ intents::{Intent, IntentHandle}, SessionInit, @@ -15,7 +14,10 @@ use crate::{ mod actor; mod peer_manager; +use self::peer_manager::PeerManager; + pub use self::actor::ActorHandle; +pub use self::peer_manager::AcceptOpts; const PEER_MANAGER_INBOX_CAP: usize = 128; @@ -30,11 +32,12 @@ impl Engine { pub fn spawn( endpoint: Endpoint, create_store: impl 'static + Send + FnOnce() -> S, + accept_opts: AcceptOpts, ) -> Self { let me = endpoint.node_id(); let actor = ActorHandle::spawn(create_store, me); let (pm_inbox_tx, pm_inbox_rx) = mpsc::channel(PEER_MANAGER_INBOX_CAP); - let peer_manager = PeerManager::new(actor.clone(), endpoint, pm_inbox_rx); + let peer_manager = PeerManager::new(actor.clone(), endpoint, pm_inbox_rx, accept_opts); let peer_manager_handle = tokio::task::spawn( async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } .instrument(error_span!("peer_manager", me = me.fmt_short())), @@ -80,7 +83,7 @@ mod tests { use crate::{ auth::{CapSelector, DelegateTo}, - engine::Engine, + engine::{AcceptOpts, Engine}, form::EntryForm, net::ALPN, proto::{ @@ -278,8 +281,8 @@ mod tests { (Engine, NodeId, UserId), (Engine, NodeId, UserId), )> { - let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng).await?; - let (betty, betty_ep, betty_addr, betty_task) = create(rng).await?; + let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng, Default::default()).await?; + let (betty, betty_ep, betty_addr, betty_task) = create(rng, Default::default()).await?; let betty_node_id = betty_addr.node_id; let alfie_node_id = alfie_addr.node_id; @@ -302,6 +305,7 @@ mod tests { pub async fn create( rng: &mut rand_chacha::ChaCha12Rng, + accept_opts: AcceptOpts, ) -> anyhow::Result<( Engine, Endpoint, @@ -316,7 +320,7 @@ mod tests { let node_addr = endpoint.node_addr().await?; let payloads = iroh_blobs::store::mem::Store::default(); let create_store = move || crate::store::memory::Store::new(payloads); - let handle = Engine::spawn(endpoint.clone(), create_store); + let handle = Engine::spawn(endpoint.clone(), create_store, accept_opts); let accept_task = tokio::task::spawn({ let handle = handle.clone(); let endpoint = endpoint.clone(); diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 2a9613cd32..22991a1bee 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -1,12 +1,13 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, future::Future, sync::Arc}; use anyhow::{anyhow, Context, Result}; use futures_buffered::join_all; -use futures_lite::StreamExt; +use futures_lite::{future::Boxed, StreamExt}; use futures_util::FutureExt; use iroh_net::{ - endpoint::{get_remote_node_id, Connection}, + endpoint::{get_remote_node_id, Connection, VarInt}, + util::AbortingJoinHandle, Endpoint, NodeId, }; use tokio::{ @@ -21,14 +22,66 @@ use crate::{ net::{WillowConn, ALPN}, proto::sync::AccessChallenge, session::{ - intents::Intent, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, - SessionMode, SessionUpdate, + intents::{EventKind, Intent}, + Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, }, }; use super::actor::ActorHandle; -const ERROR_CODE_IGNORE_CONN: u8 = 1; +const ERROR_CODE_IGNORE_CONN: VarInt = VarInt::from_u32(1); + +/// Customize what to do with incoming connections. +/// +/// You can use [`AcceptOpts::default`] to instantiate with the default behavior: +/// * Accept all incoming connections, and submit interest in everything we have +/// * Do not track events for sessions created from incoming connections for which we did not +/// signal a specific interest ourselves as well +/// +/// Use [`Self::accept_custom`] to customize which sessions to accept, and which interests to +/// submit. +/// +/// Use [`Self::track_events`] to receive events for sessions we accepted. +#[derive(derive_more::Debug, Default)] +pub struct AcceptOpts { + #[debug("{:?}", accept_cb.as_ref().map(|_| "_"))] + accept_cb: Option, + track_events: Option>, +} + +impl AcceptOpts { + /// Registers a callback to determine the fate of incoming connections. + /// + /// The callback gets the connecting peer's [`NodeId`] as argument, and must return a future + /// that resolves to `Option<`[SessionInit]``>`. When returning `None`, the session will not be + /// accepted. When returning a `SessionInit`, the session will be accepted with these + /// interests. + pub fn accept_custom(mut self, cb: F) -> Self + where + F: Fn(NodeId) -> Fut + 'static + Send + Sync, + Fut: 'static + Send + Future>, + { + let cb = Box::new(move |peer: NodeId| { + let fut: Boxed> = Box::pin((cb)(peer)); + fut + }); + self.accept_cb = Some(cb); + self + } + + /// Registers an event channel for events from accepted connections. + /// + /// If called, the passed [`mpsc::Sender`] will receive all events emitted from session + /// intents for incoming connections. The corresponding [`mpsc::Receiver`] **must** then be + /// received from in a loop. The session will be blocked from proceeding if the receiver is not + /// able to process events fast enough. + /// + /// If not called, events from session intents for incoming connections will be dropped. + pub fn track_events(mut self, sender: mpsc::Sender<(NodeId, EventKind)>) -> Self { + self.track_events = Some(sender); + self + } +} #[derive(derive_more::Debug)] pub(super) enum Input { @@ -42,14 +95,17 @@ pub(super) enum Input { }, } +type AcceptCb = Box Boxed> + Send + Sync + 'static>; + #[derive(derive_more::Debug)] pub(super) struct PeerManager { actor: ActorHandle, endpoint: Endpoint, inbox: mpsc::Receiver, - events_rx: StreamMap>, + session_events_rx: StreamMap>, tasks: JoinSet<(NodeId, Result)>, peers: HashMap, + accept_handlers: AcceptHandlers, } impl PeerManager { @@ -57,14 +113,16 @@ impl PeerManager { actor_handle: ActorHandle, endpoint: Endpoint, inbox: mpsc::Receiver, + accept_opts: AcceptOpts, ) -> Self { PeerManager { endpoint: endpoint.clone(), actor: actor_handle, inbox, - events_rx: Default::default(), + session_events_rx: Default::default(), tasks: Default::default(), peers: Default::default(), + accept_handlers: AcceptHandlers::new(accept_opts), } } @@ -75,7 +133,7 @@ impl PeerManager { trace!(?input, "tick: inbox"); self.handle_input(input).await; } - Some((session_id, event)) = self.events_rx.next(), if !self.events_rx.is_empty() => { + Some((session_id, event)) = self.session_events_rx.next(), if !self.session_events_rx.is_empty() => { trace!(?session_id, ?event, "tick: event"); self.handle_event(session_id, event); } @@ -97,11 +155,11 @@ impl PeerManager { async fn handle_input(&mut self, input: Input) { match input { Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, - Input::HandleConnection { conn } => self.handle_connection(conn), + Input::HandleConnection { conn } => self.handle_connection(conn).await, } } - fn handle_connection(&mut self, conn: Connection) { + async fn handle_connection(&mut self, conn: Connection) { let peer = match get_remote_node_id(&conn) { Ok(node_id) => node_id, Err(err) => { @@ -113,62 +171,48 @@ impl PeerManager { match self.peers.get_mut(&peer) { None => { - // TODO: Allow to pass RNG. - let our_nonce = AccessChallenge::generate(); - let abort_handle = self - .tasks - .spawn(WillowConn::betty(conn, me, our_nonce).map(move |res| (peer, res))); - let init = SessionInit::continuous(Interests::All); - let intent = Intent::new_detached(init); - self.peers.insert( - peer, - PeerState::Pending { - our_role: Role::Betty, - intents: vec![intent], - abort_handle, - }, - ); + if let Some(intent) = self.accept_handlers.accept(peer).await { + let abort_handle = self.tasks.spawn( + WillowConn::betty(conn, me, AccessChallenge::generate()) + .map(move |res| (peer, res)), + ); + self.peers.insert( + peer, + PeerState::Pending { + our_role: Role::Betty, + intents: vec![intent], + abort_handle, + }, + ); + } } Some(PeerState::Pending { - our_role: Role::Alfie, + our_role, abort_handle, intents, }) => { - if me > peer { + if *our_role == Role::Betty { + tracing::debug!("ignore incoming connection (already accepting)"); + conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-already-accepting"); + } else if me > peer { tracing::debug!( "ignore incoming connection (already dialing and our dial wins)" ); - conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-our-dial-wins"); - } else { - // Abort our dial attempt. + conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-our-dial-wins"); + } else if let Some(intent) = self.accept_handlers.accept(peer).await { + // Abort our dial attempt and insert the new abort handle and intent. abort_handle.abort(); - // Set the new abort handle. - let our_nonce = AccessChallenge::generate(); - *abort_handle = self - .tasks - .spawn(WillowConn::betty(conn, me, our_nonce).map(move |res| (peer, res))); - // Add a catchall interest. - let init = SessionInit::new(Interests::All, SessionMode::Live); - let intent = Intent::new_detached(init); + *abort_handle = self.tasks.spawn( + WillowConn::betty(conn, me, AccessChallenge::generate()) + .map(move |res| (peer, res)), + ); + *our_role = Role::Betty; intents.push(intent); } } - Some(PeerState::Pending { - our_role: Role::Betty, - .. - }) => { - tracing::debug!("ignore incoming connection (already accepting)"); - conn.close( - ERROR_CODE_IGNORE_CONN.into(), - b"duplicate-already-accepting", - ); - } Some(PeerState::Active { .. }) => { tracing::debug!("ignore incoming connection (already connected)"); - conn.close( - ERROR_CODE_IGNORE_CONN.into(), - b"duplicate-already-accepting", - ); + conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-already-accepting"); } } } @@ -211,7 +255,8 @@ impl PeerManager { update_tx, event_rx, } = session_handle; - self.events_rx.insert(peer, ReceiverStream::new(event_rx)); + self.session_events_rx + .insert(peer, ReceiverStream::new(event_rx)); self.peers.insert(peer, PeerState::Active { update_tx }); Ok(()) } @@ -270,3 +315,90 @@ enum PeerState { update_tx: mpsc::Sender, }, } + +#[derive(derive_more::Debug)] +struct AcceptHandlers { + #[debug("{:?}", accept_cb.as_ref().map(|_| "_"))] + accept_cb: Option, + event_forwarder: Option, +} + +impl AcceptHandlers { + pub fn new(opts: AcceptOpts) -> Self { + Self { + accept_cb: opts.accept_cb, + event_forwarder: opts.track_events.map(EventForwarder::new), + } + } + + pub async fn accept(&self, peer: NodeId) -> Option { + let init = match &self.accept_cb { + None => Some(SessionInit::continuous(Interests::All)), + Some(cb) => cb(peer).await, + }; + let init = init?; + + let intent = match &self.event_forwarder { + None => Intent::new_detached(init), + Some(forwarder) => { + let (intent, handle) = Intent::new(init); + let (_update_tx, event_rx) = handle.split(); + forwarder.add_intent(peer, event_rx).await; + intent + } + }; + + Some(intent) + } +} + +#[derive(Debug)] +struct EventForwarder { + _join_handle: AbortingJoinHandle<()>, + stream_sender: mpsc::Sender<(NodeId, ReceiverStream)>, +} + +#[derive(Debug)] +struct EventForwarderActor { + stream_receiver: mpsc::Receiver<(NodeId, ReceiverStream)>, + streams: StreamMap>, + event_sender: mpsc::Sender<(NodeId, EventKind)>, +} + +impl EventForwarder { + fn new(event_sender: mpsc::Sender<(NodeId, EventKind)>) -> EventForwarder { + let (stream_sender, stream_receiver) = mpsc::channel(16); + let forwarder = EventForwarderActor { + stream_receiver, + streams: Default::default(), + event_sender, + }; + let join_handle = tokio::task::spawn(forwarder.run()); + EventForwarder { + _join_handle: join_handle.into(), + stream_sender, + } + } + + pub async fn add_intent(&self, peer: NodeId, event_stream: ReceiverStream) { + self.stream_sender.send((peer, event_stream)).await.ok(); + } +} + +impl EventForwarderActor { + async fn run(mut self) { + loop { + tokio::select! { + Some((peer, receiver)) = self.stream_receiver.recv() => { + self.streams.insert(peer, receiver); + }, + Some((peer, event)) = self.streams.next() => { + if let Err(_receiver_dropped) = self.event_sender.send((peer, event)).await { + break; + } + }, + else => break, + } + } + } +} From ef26a4aaadc1168dd08eb371fafcab210a4eabc8 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 24 Jul 2024 11:09:16 +0200 Subject: [PATCH 097/198] tests: improve engine tests --- iroh-willow/src/engine.rs | 194 +++++++++++++++++++++----------------- 1 file changed, 110 insertions(+), 84 deletions(-) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 74a2a21e15..1e078e858f 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -75,11 +75,17 @@ impl std::ops::Deref for Engine { #[cfg(test)] mod tests { + use std::sync::{Arc, Mutex}; + + use anyhow::Result; use bytes::Bytes; + use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; use iroh_net::{Endpoint, NodeId}; use rand::SeedableRng; use rand_chacha::ChaCha12Rng; + use rand_core::CryptoRngCore; + use tokio::task::JoinHandle; use crate::{ auth::{CapSelector, DelegateTo}, @@ -97,25 +103,23 @@ mod tests { fn create_rng(seed: &str) -> ChaCha12Rng { let seed = iroh_base::hash::Hash::new(seed); - rand_chacha::ChaCha12Rng::from_seed(*(seed.as_bytes())) + ChaCha12Rng::from_seed(*(seed.as_bytes())) } #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_two_intents() -> anyhow::Result<()> { + async fn peer_manager_two_intents() -> Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = create_rng("peer_manager_two_intents"); - let ( - shutdown, - namespace, - (alfie, _alfie_node_id, _alfie_user), - (betty, betty_node_id, betty_user), - ) = create_and_setup_two(&mut rng).await?; + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; - let task_foo = tokio::task::spawn({ + let task_foo_path = tokio::task::spawn({ let alfie = alfie.clone(); async move { let path = Path::new(&[b"foo"]).unwrap(); @@ -154,7 +158,7 @@ mod tests { } }); - let task_bar = tokio::task::spawn({ + let task_bar_path = tokio::task::spawn({ let alfie = alfie.clone(); async move { let path = Path::new(&[b"bar"]).unwrap(); @@ -194,22 +198,22 @@ mod tests { } }); - task_foo.await.unwrap(); - task_bar.await.unwrap(); - shutdown(); + task_foo_path.await.unwrap(); + task_bar_path.await.unwrap(); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) } #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_update_intent() -> anyhow::Result<()> { + async fn peer_manager_update_intent() -> Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = create_rng("peer_manager_update_intent"); - let ( - shutdown, - namespace, - (alfie, _alfie_node_id, _alfie_user), - (betty, betty_node_id, betty_user), - ) = create_and_setup_two(&mut rng).await?; + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; @@ -219,7 +223,6 @@ mod tests { let init = SessionInit::new(interests, SessionMode::Live); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - println!("start"); assert_eq!( intent.next().await.unwrap(), EventKind::CapabilityIntersection { @@ -227,7 +230,6 @@ mod tests { area: Area::full(), } ); - println!("first in!"); assert_eq!( intent.next().await.unwrap(), EventKind::InterestIntersection { @@ -269,80 +271,104 @@ mod tests { assert!(intent.next().await.is_none(),); - shutdown(); + [alfie, betty].map(Peer::shutdown).try_join().await?; Ok(()) } - pub async fn create_and_setup_two( - rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<( - impl Fn(), - NamespaceId, - (Engine, NodeId, UserId), - (Engine, NodeId, UserId), - )> { - let (alfie, alfie_ep, alfie_addr, alfie_task) = create(rng, Default::default()).await?; - let (betty, betty_ep, betty_addr, betty_task) = create(rng, Default::default()).await?; - - let betty_node_id = betty_addr.node_id; - let alfie_node_id = alfie_addr.node_id; - alfie_ep.add_node_addr(betty_addr)?; - betty_ep.add_node_addr(alfie_addr)?; - - let (namespace_id, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - - let shutdown = move || { - betty_task.abort(); - alfie_task.abort(); - }; - Ok(( - shutdown, - namespace_id, - (alfie, alfie_node_id, alfie_user), - (betty, betty_node_id, betty_user), - )) + #[derive(Debug, Clone)] + struct Peer { + endpoint: Endpoint, + engine: Engine, + accept_task: Arc>>>>, } - pub async fn create( - rng: &mut rand_chacha::ChaCha12Rng, - accept_opts: AcceptOpts, - ) -> anyhow::Result<( - Engine, - Endpoint, - iroh_net::NodeAddr, - tokio::task::JoinHandle>, - )> { - let endpoint = Endpoint::builder() - .secret_key(iroh_net::key::SecretKey::generate_with_rng(rng)) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let node_addr = endpoint.node_addr().await?; - let payloads = iroh_blobs::store::mem::Store::default(); - let create_store = move || crate::store::memory::Store::new(payloads); - let handle = Engine::spawn(endpoint.clone(), create_store, accept_opts); - let accept_task = tokio::task::spawn({ - let handle = handle.clone(); - let endpoint = endpoint.clone(); - async move { - while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; - if alpn != ALPN { - continue; + impl Peer { + pub async fn spawn( + secret_key: iroh_net::key::SecretKey, + accept_opts: AcceptOpts, + ) -> Result { + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let payloads = iroh_blobs::store::mem::Store::default(); + let create_store = move || crate::store::memory::Store::new(payloads); + let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); + let accept_task = tokio::task::spawn({ + let engine = engine.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let alpn = conn.alpn().await?; + if alpn != ALPN { + continue; + } + let conn = conn.await?; + engine.handle_connection(conn).await?; } - let conn = conn.await?; - handle.handle_connection(conn).await?; + Result::Ok(()) + } + }); + Ok(Self { + endpoint, + engine, + accept_task: Arc::new(Mutex::new(Some(accept_task))), + }) + } + + pub async fn shutdown(self) -> Result<()> { + let accept_task = self.accept_task.lock().unwrap().take(); + if let Some(accept_task) = accept_task { + accept_task.abort(); + match accept_task.await { + Err(err) if err.is_cancelled() => {} + Ok(Ok(())) => {} + Err(err) => Err(err)?, + Ok(Err(err)) => Err(err)?, } - anyhow::Result::Ok(()) } - }); - Ok((handle, endpoint, node_addr, accept_task)) + self.engine.shutdown().await?; + self.endpoint.close(0u8.into(), b"").await?; + Ok(()) + } + + pub fn node_id(&self) -> NodeId { + self.endpoint.node_id() + } + } + + impl std::ops::Deref for Peer { + type Target = Engine; + fn deref(&self) -> &Self::Target { + &self.engine + } + } + + async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { + let peers = [ + iroh_net::key::SecretKey::generate_with_rng(rng), + iroh_net::key::SecretKey::generate_with_rng(rng), + ] + .map(|secret_key| Peer::spawn(secret_key, Default::default())) + .try_join() + .await?; + + peers[0] + .endpoint + .add_node_addr(peers[1].endpoint.node_addr().await?)?; + + peers[1] + .endpoint + .add_node_addr(peers[0].endpoint.node_addr().await?)?; + + Ok(peers) } async fn setup_and_delegate( alfie: &Engine, betty: &Engine, - ) -> anyhow::Result<(NamespaceId, UserId, UserId)> { + ) -> Result<(NamespaceId, UserId, UserId)> { let user_alfie = alfie.create_user().await?; let user_betty = betty.create_user().await?; @@ -368,7 +394,7 @@ mod tests { user: UserId, path: &[&[u8]], bytes: impl Into, - ) -> anyhow::Result<()> { + ) -> Result<()> { let path = Path::new(path)?; let entry = EntryForm::new_bytes(namespace_id, path, bytes); handle.insert(entry, user).await?; From 9ad08c9c38be75e7c3e63a62bfaebf59117f697b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 24 Jul 2024 16:06:57 +0200 Subject: [PATCH 098/198] refactor: move capability storage to trait --- iroh-willow/src/auth.rs | 172 ++++++++--------------------- iroh-willow/src/engine/actor.rs | 2 +- iroh-willow/src/proto/meadowcap.rs | 6 - iroh-willow/src/store.rs | 6 +- iroh-willow/src/store/memory.rs | 134 +++++++++++++++++++++- iroh-willow/src/store/traits.rs | 35 +++++- 6 files changed, 210 insertions(+), 145 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index ec33c8cdca..c458c34f22 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::{Arc, RwLock}, -}; +use std::collections::{HashMap, HashSet}; use anyhow::Result; use serde::{Deserialize, Serialize}; @@ -16,7 +13,7 @@ use crate::{ willow::{Entry, WriteCapability}, }, session::{AreaOfInterestSelector, Interests}, - store::traits::{SecretStorage, SecretStoreError, Storage}, + store::traits::{CapsStorage, SecretStorage, SecretStoreError, Storage}, }; pub type InterestMap = HashMap>; @@ -39,8 +36,8 @@ impl DelegateTo { #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct CapSelector { pub namespace_id: NamespaceId, - pub user: UserSelector, - pub area: AreaSelector, + pub receiver: ReceiverSelector, + pub granted_area: AreaSelector, } impl From for CapSelector { @@ -50,43 +47,47 @@ impl From for CapSelector { } impl CapSelector { - pub fn matches(&self, cap: &McCapability) -> bool { + pub fn is_covered_by(&self, cap: &McCapability) -> bool { self.namespace_id == cap.granted_namespace().id() - && self.user.includes(&cap.receiver().id()) - && self.area.matches(&cap.granted_area()) + && self.receiver.includes(&cap.receiver().id()) + && self.granted_area.is_covered_by(&cap.granted_area()) } - pub fn new(namespace_id: NamespaceId, user: UserSelector, area: AreaSelector) -> Self { + pub fn new( + namespace_id: NamespaceId, + receiver: ReceiverSelector, + granted_area: AreaSelector, + ) -> Self { Self { namespace_id, - user, - area, + receiver, + granted_area, } } pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { Self::new( namespace_id, - UserSelector::Exact(user_id), + ReceiverSelector::Exact(user_id), AreaSelector::Widest, ) } pub fn widest(namespace_id: NamespaceId) -> Self { - Self::new(namespace_id, UserSelector::Any, AreaSelector::Widest) + Self::new(namespace_id, ReceiverSelector::Any, AreaSelector::Widest) } } #[derive( Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, )] -pub enum UserSelector { +pub enum ReceiverSelector { #[default] Any, Exact(UserId), } -impl UserSelector { +impl ReceiverSelector { fn includes(&self, user: &UserId) -> bool { match self { Self::Any => true, @@ -99,27 +100,27 @@ impl UserSelector { pub enum AreaSelector { #[default] Widest, - Area(Area), - Point(Point), + ContainsArea(Area), + ContainsPoint(Point), } impl AreaSelector { - pub fn matches(&self, other: &Area) -> bool { + pub fn is_covered_by(&self, other: &Area) -> bool { match self { AreaSelector::Widest => true, - AreaSelector::Area(area) => other.includes_area(area), - AreaSelector::Point(point) => other.includes_point(point), + AreaSelector::ContainsArea(area) => other.includes_area(area), + AreaSelector::ContainsPoint(point) => other.includes_point(point), } } } impl CapSelector { - pub fn for_entry(entry: &Entry, user_id: UserSelector) -> Self { - let granted_area = AreaSelector::Point(Point::from_entry(entry)); + pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { + let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); Self { namespace_id: entry.namespace_id, - user: user_id, - area: granted_area, + receiver: user_id, + granted_area, } } } @@ -159,23 +160,18 @@ pub struct CapabilityHash(iroh_base::hash::Hash); #[derive(Debug, Clone)] pub struct Auth { secrets: S::Secrets, - // TODO: Move to store and trait S::Caps - caps: Arc>, + caps: S::Caps, } + impl Auth { - pub fn new(secrets: S::Secrets) -> Self { - Self { - secrets, - // TODO: persist - caps: Default::default(), - } + pub fn new(secrets: S::Secrets, caps: S::Caps) -> Self { + Self { secrets, caps } } pub fn get_write_cap( &self, selector: &CapSelector, ) -> Result, AuthError> { - let cap = self.caps.read().unwrap().get_write_cap(selector); - // debug!(?selector, ?cap, "get write cap"); + let cap = self.caps.get_write_cap(selector)?; Ok(cap) } @@ -183,29 +179,18 @@ impl Auth { &self, selector: &CapSelector, ) -> Result, AuthError> { - let cap = self.caps.read().unwrap().get_read_cap(selector); - // debug!(?selector, ?cap, "get read cap"); + let cap = self.caps.get_read_cap(selector)?; Ok(cap) } - pub fn list_read_caps(&self) -> impl Iterator { - // TODO: Less clones? - self.caps - .read() - .unwrap() - .read_caps - .values() - .flatten() - .cloned() - .collect::>() - .into_iter() + pub fn list_read_caps(&self) -> Result + '_> { + self.caps.list_read_caps(None) } pub fn import_caps( &self, caps: impl IntoIterator, ) -> Result<(), AuthError> { - let mut store = self.caps.write().unwrap(); for cap in caps.into_iter() { cap.validate()?; // Only allow importing caps we can use. @@ -214,24 +199,27 @@ impl Auth { if !self.secrets.has_user(&user_id) { return Err(AuthError::MissingUserSecret(user_id)); } - store.insert_cap(cap); + self.caps.insert(cap)?; } Ok(()) } - pub fn insert_caps_unchecked(&self, caps: impl IntoIterator) { - let mut store = self.caps.write().unwrap(); + pub fn insert_caps_unchecked( + &self, + caps: impl IntoIterator, + ) -> Result<(), AuthError> { for cap in caps.into_iter() { debug!(?cap, "insert cap"); - store.insert_cap(cap); + self.caps.insert(cap)?; } + Ok(()) } pub fn resolve_interests(&self, interests: Interests) -> Result { match interests { Interests::All => { let out = self - .list_read_caps() + .list_read_caps()? .map(|auth| { let area = auth.read_cap().granted_area(); let aoi = AreaOfInterest::new(area); @@ -280,7 +268,7 @@ impl Auth { let read_cap = self.create_read_cap(namespace_key, user_key)?; let write_cap = self.create_write_cap(namespace_key, user_key)?; let pack = [read_cap, write_cap]; - self.insert_caps_unchecked(pack.clone()); + self.insert_caps_unchecked(pack.clone())?; Ok(pack) } @@ -349,7 +337,7 @@ impl Auth { out.push(write_cap); } if store { - self.insert_caps_unchecked(out.clone()); + self.insert_caps_unchecked(out.clone())?; } Ok(out) } @@ -393,76 +381,6 @@ impl Auth { } } -// TODO: Add trait and move impl to store::memory -#[derive(Debug, Default)] -pub struct CapStore { - write_caps: HashMap>, - read_caps: HashMap>, -} - -impl CapStore { - fn get_write_cap(&self, selector: &CapSelector) -> Option { - let candidates = self - .write_caps - .get(&selector.namespace_id) - .into_iter() - .flatten() - .filter(|cap| selector.matches(cap)); - - // Select the best candidate, by sorting for - // * first: widest area - // * then: smallest number of delegations - let best = candidates.reduce( - |prev, next| { - if next.is_wider_than(prev) { - next - } else { - prev - } - }, - ); - best.cloned() - } - - fn get_read_cap(&self, selector: &CapSelector) -> Option { - let candidates = self - .read_caps - .get(&selector.namespace_id) - .into_iter() - .flatten() - .filter(|auth| selector.matches(auth.read_cap())); - - // Select the best candidate, by sorting for - // * smallest number of delegations - // * widest area - let best = candidates.reduce(|prev, next| { - if next.read_cap().is_wider_than(prev.read_cap()) { - next - } else { - prev - } - }); - best.cloned() - } - - fn insert_cap(&mut self, cap: CapabilityPack) { - match cap { - CapabilityPack::Read(cap) => { - self.read_caps - .entry(cap.read_cap().granted_namespace().id()) - .or_default() - .push(cap); - } - CapabilityPack::Write(cap) => { - self.write_caps - .entry(cap.granted_namespace().id()) - .or_default() - .push(cap); - } - } - } -} - #[derive(thiserror::Error, Debug)] pub enum AuthError { #[error("invalid user id: {}", .0.fmt_short())] diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 0609c2707d..a1a9c29567 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -357,7 +357,7 @@ impl Actor { intents, cancel_token.clone(), session_id, - EventSender(event_tx.clone()), + EventSender(event_tx), update_rx, ) .instrument(error_span!("session", peer = %peer.fmt_short())); diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 761d52c05f..ade5aec558 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -228,12 +228,6 @@ impl McCapability { (self.granted_area().includes_area(&other.granted_area())) || (self.granted_area() == other.granted_area() && self.delegations().len() < other.delegations().len()) - - // match self.delegations().len().cmp(&other.delegations().len()) { - // Ordering::Less => true, - // Ordering::Greater => false, - // Ordering::Equal => self.granted_area().includes_area(&other.granted_area()), - // } } pub fn delegate( diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 94d50f7e51..474cfb7ea8 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, Result}; use rand_core::CryptoRngCore; use crate::{ - auth::{Auth, AuthError, CapSelector, UserSelector}, + auth::{Auth, AuthError, CapSelector, ReceiverSelector}, form::{AuthForm, EntryOrForm}, proto::{ keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, @@ -35,7 +35,7 @@ impl Store { entries: WatchableEntryStore::new(storage.entries().clone()), secrets: storage.secrets().clone(), payloads: storage.payloads().clone(), - auth: Auth::new(storage.secrets().clone()), + auth: Auth::new(storage.secrets().clone(), storage.caps().clone()), } } @@ -64,7 +64,7 @@ impl Store { let capability = match auth { AuthForm::Exact(cap) => cap, AuthForm::Any(user_id) => { - let selector = CapSelector::for_entry(&entry, UserSelector::Exact(user_id)); + let selector = CapSelector::for_entry(&entry, ReceiverSelector::Exact(user_id)); self.auth() .get_write_cap(&selector)? .ok_or_else(|| anyhow!("no write capability available"))? diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 62a44ce4d5..14134bebc2 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -5,12 +5,13 @@ use std::rc::Rc; use anyhow::Result; use crate::{ + auth::{CapSelector, CapabilityPack}, proto::{ grouping::{Range, RangeEnd, ThreeDRange}, keys::{NamespaceSecretKey, UserId, UserSecretKey}, meadowcap, - sync::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId}, + sync::{Fingerprint, ReadAuthorisation}, + willow::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, }, store::traits::{self, RangeSplit, SplitAction, SplitOpts}, }; @@ -20,6 +21,7 @@ pub struct Store { secrets: Rc>, entries: Rc>, payloads: iroh_blobs::store::mem::Store, + caps: Rc>, } impl Store { @@ -28,6 +30,7 @@ impl Store { payloads, secrets: Default::default(), entries: Default::default(), + caps: Default::default(), } } } @@ -36,6 +39,7 @@ impl traits::Storage for Store { type Entries = Rc>; type Secrets = Rc>; type Payloads = iroh_blobs::store::mem::Store; + type Caps = Rc>; fn entries(&self) -> &Self::Entries { &self.entries @@ -48,6 +52,10 @@ impl traits::Storage for Store { fn payloads(&self) -> &Self::Payloads { &self.payloads } + + fn caps(&self) -> &Self::Caps { + &self.caps + } } #[derive(Debug, Default)] @@ -231,3 +239,125 @@ impl traits::EntryStorage for Rc> { Ok(true) } } + +#[derive(Debug, Default)] +pub struct CapsStore { + write_caps: HashMap>, + read_caps: HashMap>, +} + +impl CapsStore { + fn get_write_cap(&self, selector: &CapSelector) -> Result> { + let candidates = self + .write_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|cap| selector.is_covered_by(cap)); + + // Select the best candidate, by sorting for + // * first: widest area + // * then: smallest number of delegations + let best = candidates.reduce( + |prev, next| { + if next.is_wider_than(prev) { + next + } else { + prev + } + }, + ); + Ok(best.cloned()) + } + + fn get_read_cap(&self, selector: &CapSelector) -> Result> { + let candidates = self + .read_caps + .get(&selector.namespace_id) + .into_iter() + .flatten() + .filter(|auth| selector.is_covered_by(auth.read_cap())); + + // Select the best candidate, by sorting for + // * widest area + let best = candidates.reduce(|prev, next| { + if next.read_cap().is_wider_than(prev.read_cap()) { + next + } else { + prev + } + }); + + Ok(best.cloned()) + } + + fn list_write_caps( + &self, + namespace: Option, + ) -> Result + 'static> { + let caps = if let Some(namespace) = namespace { + self.write_caps.get(&namespace).cloned().unwrap_or_default() + } else { + self.write_caps.values().flatten().cloned().collect() + }; + Ok(caps.into_iter()) + } + + fn list_read_caps( + &self, + namespace: Option, + ) -> Result + 'static> { + let caps = if let Some(namespace) = namespace { + self.read_caps.get(&namespace).cloned().unwrap_or_default() + } else { + self.read_caps.values().flatten().cloned().collect() + }; + Ok(caps.into_iter()) + } + + fn insert(&mut self, cap: CapabilityPack) { + match cap { + CapabilityPack::Read(cap) => { + self.read_caps + .entry(cap.read_cap().granted_namespace().id()) + .or_default() + .push(cap); + } + CapabilityPack::Write(cap) => { + self.write_caps + .entry(cap.granted_namespace().id()) + .or_default() + .push(cap); + } + } + } +} + +impl traits::CapsStorage for Rc> { + fn insert(&self, cap: CapabilityPack) -> Result<()> { + self.borrow_mut().insert(cap); + Ok(()) + } + + fn list_read_caps( + &self, + namespace: Option, + ) -> Result> { + self.borrow().list_read_caps(namespace) + } + + fn list_write_caps( + &self, + namespace: Option, + ) -> Result> { + self.borrow().list_write_caps(namespace) + } + + fn get_write_cap(&self, selector: &CapSelector) -> Result> { + self.borrow().get_write_cap(selector) + } + + fn get_read_cap(&self, selector: &CapSelector) -> Result> { + self.borrow().get_read_cap(selector) + } +} diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 8a70d9c71a..97bbf0e165 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -2,21 +2,26 @@ use std::fmt::Debug; use anyhow::Result; -use crate::proto::{ - grouping::ThreeDRange, - keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap, - sync::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId}, +use crate::{ + auth::{CapSelector, CapabilityPack}, + proto::{ + grouping::ThreeDRange, + keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, + meadowcap, + sync::{Fingerprint, ReadAuthorisation}, + willow::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, + }, }; pub trait Storage: Debug + Clone + 'static { type Entries: EntryStorage; type Secrets: SecretStorage; type Payloads: iroh_blobs::store::Store; + type Caps: CapsStorage; fn entries(&self) -> &Self::Entries; fn secrets(&self) -> &Self::Secrets; fn payloads(&self) -> &Self::Payloads; + fn caps(&self) -> &Self::Caps; } pub trait SecretStorage: Debug + Clone + 'static { @@ -139,3 +144,21 @@ impl Default for SplitOpts { } } } + +pub trait CapsStorage: Debug + Clone { + fn insert(&self, cap: CapabilityPack) -> Result<()>; + + fn list_read_caps( + &self, + namespace: Option, + ) -> Result + '_>; + + fn list_write_caps( + &self, + namespace: Option, + ) -> Result + '_>; + + fn get_write_cap(&self, selector: &CapSelector) -> Result>; + + fn get_read_cap(&self, selector: &CapSelector) -> Result>; +} From 24f3defefb173f3081af6e3314b539775e4c090c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 1 Aug 2024 12:24:08 +0200 Subject: [PATCH 099/198] docs/refactor: improve docs and cleanup some types --- iroh-willow/src/auth.rs | 92 +++++++++----- iroh-willow/src/engine.rs | 26 ++-- iroh-willow/src/form.rs | 43 +++++-- iroh-willow/src/net.rs | 10 +- iroh-willow/src/proto/meadowcap.rs | 18 +-- iroh-willow/src/proto/sync.rs | 2 +- iroh-willow/src/session.rs | 6 +- iroh-willow/src/util.rs | 1 - iroh-willow/src/util/task.rs | 193 ----------------------------- 9 files changed, 127 insertions(+), 264 deletions(-) delete mode 100644 iroh-willow/src/util/task.rs diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index c458c34f22..a7045a7a4c 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -21,11 +21,11 @@ pub type InterestMap = HashMap>; #[derive(Debug, Clone)] pub struct DelegateTo { pub user: UserId, - pub restrict_area: Option, + pub restrict_area: RestrictArea, } impl DelegateTo { - pub fn new(user: UserId, restrict_area: Option) -> Self { + pub fn new(user: UserId, restrict_area: RestrictArea) -> Self { Self { user, restrict_area, @@ -33,10 +33,29 @@ impl DelegateTo { } } +#[derive(Debug, Clone)] +pub enum RestrictArea { + None, + Restrict(Area) +} + +impl RestrictArea { + pub fn with_default(self, default: Area) -> Area { + match self { + RestrictArea::None => default.clone(), + RestrictArea::Restrict(area) => area + } + } +} + +/// Selector for a capability. #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct CapSelector { + /// The namespace to which the capability must grant access. pub namespace_id: NamespaceId, + /// Select the user who may use the capability. pub receiver: ReceiverSelector, + /// Select the area to which the capability grants access. pub granted_area: AreaSelector, } @@ -47,12 +66,14 @@ impl From for CapSelector { } impl CapSelector { + /// Checks if the provided capability is matched by this [`CapSelector`]. pub fn is_covered_by(&self, cap: &McCapability) -> bool { self.namespace_id == cap.granted_namespace().id() && self.receiver.includes(&cap.receiver().id()) && self.granted_area.is_covered_by(&cap.granted_area()) } + /// Creates a new [`CapSelector`]. pub fn new( namespace_id: NamespaceId, receiver: ReceiverSelector, @@ -65,6 +86,8 @@ impl CapSelector { } } + /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace + /// and user. pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { Self::new( namespace_id, @@ -73,22 +96,41 @@ impl CapSelector { ) } - pub fn widest(namespace_id: NamespaceId) -> Self { - Self::new(namespace_id, ReceiverSelector::Any, AreaSelector::Widest) + /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace. + /// + /// Will use any user available in our secret store and select the capability which grants the + /// widest area. + // TODO: Document exact selection process if there are capabilities with distinct areas. + pub fn widest(namespace: NamespaceId) -> Self { + Self::new(namespace, ReceiverSelector::Any, AreaSelector::Widest) + } + + /// Select a capability which authorises writing the provided `entry` on behalf of the provided + /// `user_id`. + pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { + let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); + Self { + namespace_id: entry.namespace_id, + receiver: user_id, + granted_area, + } } } +/// Select the receiver for a capability. #[derive( Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, )] pub enum ReceiverSelector { + /// The receiver may be any user for which we have a secret key stored. #[default] Any, + /// The receiver must be the provided user. Exact(UserId), } impl ReceiverSelector { - fn includes(&self, user: &UserId) -> bool { + pub fn includes(&self, user: &UserId) -> bool { match self { Self::Any => true, Self::Exact(u) => u == user, @@ -96,15 +138,20 @@ impl ReceiverSelector { } } +/// Selector for the area to which a capability must grant access. #[derive(Debug, Clone, Default, Hash, Eq, PartialEq)] pub enum AreaSelector { + /// Use the capability which covers the biggest area. #[default] Widest, + /// Use any capability that covers the provided area. ContainsArea(Area), + /// Use any capability that covers the provided point. ContainsPoint(Point), } impl AreaSelector { + /// Checks whether the provided [`Area`] is matched by this [`AreaSelector`]. pub fn is_covered_by(&self, other: &Area) -> bool { match self { AreaSelector::Widest => true, @@ -114,20 +161,12 @@ impl AreaSelector { } } -impl CapSelector { - pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { - let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); - Self { - namespace_id: entry.namespace_id, - receiver: user_id, - granted_area, - } - } -} - +/// A serializable capability. #[derive(Debug, Serialize, Deserialize, Clone)] pub enum CapabilityPack { + /// A read authorisation. Read(ReadAuthorisation), + /// A write authorisation. Write(WriteCapability), } @@ -154,9 +193,6 @@ impl CapabilityPack { } } -#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)] -pub struct CapabilityHash(iroh_base::hash::Hash); - #[derive(Debug, Clone)] pub struct Auth { secrets: S::Secrets, @@ -284,10 +320,10 @@ impl Auth { .secrets .get_namespace(&namespace_id) .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(&namespace_secret, user_key, AccessMode::Read) + McCapability::new_owned(&namespace_secret, user_key, AccessMode::ReadOnly) } NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::Read) + McCapability::new_communal(namespace_key, user_key, AccessMode::ReadOnly) } }; // TODO: Subspace capability. @@ -307,10 +343,10 @@ impl Auth { .secrets .get_namespace(&namespace_id) .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(&namespace_secret, user_key, AccessMode::Write) + McCapability::new_owned(&namespace_secret, user_key, AccessMode::ReadWrite) } NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::Write) + McCapability::new_communal(namespace_key, user_key, AccessMode::ReadWrite) } }; let pack = CapabilityPack::Write(cap); @@ -332,7 +368,7 @@ impl Auth { let restrict_area = to.restrict_area; let read_cap = self.delegate_read_cap(&from, user_key, restrict_area.clone())?; out.push(read_cap); - if access_mode == AccessMode::Write { + if access_mode == AccessMode::ReadWrite { let write_cap = self.delegate_write_cap(&from, user_key, restrict_area)?; out.push(write_cap); } @@ -346,7 +382,7 @@ impl Auth { &self, from: &CapSelector, to: UserPublicKey, - restrict_area: Option, + restrict_area: RestrictArea, ) -> Result { let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; let read_cap = auth.read_cap(); @@ -356,7 +392,7 @@ impl Auth { .secrets .get_user(&user_id) .ok_or(AuthError::MissingUserSecret(user_id))?; - let area = restrict_area.unwrap_or(read_cap.granted_area()); + let area = restrict_area.with_default(read_cap.granted_area()); let new_read_cap = read_cap.delegate(&user_secret, to, area)?; // TODO: Subspace capability let new_subspace_cap = None; @@ -368,14 +404,14 @@ impl Auth { &self, from: &CapSelector, to: UserPublicKey, - restrict_area: Option, + restrict_area: RestrictArea, ) -> Result { let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; let user_secret = self .secrets .get_user(&cap.receiver().id()) .ok_or(AuthError::MissingUserSecret(cap.receiver().id()))?; - let area = restrict_area.unwrap_or(cap.granted_area()); + let area = restrict_area.with_default(cap.granted_area()); let new_cap = cap.delegate(&user_secret, to, area)?; Ok(CapabilityPack::Write(new_cap)) } diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 1e078e858f..bdcc0e160e 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -25,7 +25,7 @@ const PEER_MANAGER_INBOX_CAP: usize = 128; pub struct Engine { actor_handle: ActorHandle, peer_manager_inbox: mpsc::Sender, - _peer_manager_handle: SharedAbortingJoinHandle>, + _peer_manager_task: SharedAbortingJoinHandle>, } impl Engine { @@ -35,17 +35,17 @@ impl Engine { accept_opts: AcceptOpts, ) -> Self { let me = endpoint.node_id(); - let actor = ActorHandle::spawn(create_store, me); + let actor_handle = ActorHandle::spawn(create_store, me); let (pm_inbox_tx, pm_inbox_rx) = mpsc::channel(PEER_MANAGER_INBOX_CAP); - let peer_manager = PeerManager::new(actor.clone(), endpoint, pm_inbox_rx, accept_opts); - let peer_manager_handle = tokio::task::spawn( + let peer_manager = PeerManager::new(actor_handle.clone(), endpoint, pm_inbox_rx, accept_opts); + let peer_manager_task = tokio::task::spawn( async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } .instrument(error_span!("peer_manager", me = me.fmt_short())), ); Engine { - actor_handle: actor, + actor_handle, peer_manager_inbox: pm_inbox_tx, - _peer_manager_handle: peer_manager_handle.into(), + _peer_manager_task: peer_manager_task.into(), } } @@ -88,7 +88,7 @@ mod tests { use tokio::task::JoinHandle; use crate::{ - auth::{CapSelector, DelegateTo}, + auth::{CapSelector, DelegateTo, RestrictArea}, engine::{AcceptOpts, Engine}, form::EntryForm, net::ALPN, @@ -124,7 +124,7 @@ mod tests { async move { let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); @@ -163,7 +163,7 @@ mod tests { async move { let path = Path::new(&[b"bar"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); @@ -219,7 +219,7 @@ mod tests { insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); let init = SessionInit::new(interests, SessionMode::Live); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); @@ -247,7 +247,7 @@ mod tests { assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); let path = Path::new(&[b"bar"]).unwrap(); - let interests = Interests::select().area(namespace, [Area::path(path.clone())]); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); intent.add_interests(interests).await?; assert_eq!( @@ -379,8 +379,8 @@ mod tests { let cap_for_betty = alfie .delegate_caps( CapSelector::widest(namespace_id), - AccessMode::Write, - DelegateTo::new(user_betty, None), + AccessMode::ReadWrite, + DelegateTo::new(user_betty, RestrictArea::None), ) .await?; diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 6b4db56f10..c650b84a20 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -1,4 +1,5 @@ -//! Types for forms for entries +//! Structs that allow constructing entries and other structs where some fields may be +//! automatically filled. use std::{io, path::PathBuf}; @@ -25,12 +26,18 @@ use crate::{ /// Sources where payload data can come from. #[derive(derive_more::Debug)] pub enum PayloadForm { - Hash(HashForm), + /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail + /// otherwise. + Hash(Hash), + /// Import data from the provided bytes and set as payload. #[debug("Bytes({})", _0.len())] Bytes(Bytes), + /// Import data from a file on the node's local file system and set as payload. File(PathBuf, ImportMode), #[debug("Stream")] + /// Import data from a [`Stream`] of bytes and set as payload. Stream(Box> + Send + Sync + Unpin>), + /// Import data from a [`AsyncRead`] and set as payload. #[debug("Reader")] Reader(Box), } @@ -41,8 +48,7 @@ impl PayloadForm { store: &S, ) -> anyhow::Result<(Hash, u64)> { let (hash, len) = match self { - PayloadForm::Hash(HashForm::Exact(digest, len)) => (digest, len), - PayloadForm::Hash(HashForm::Find(digest)) => { + PayloadForm::Hash(digest) => { let entry = store.get(&digest).await?; let entry = entry.ok_or_else(|| anyhow::anyhow!("hash not foundA"))?; (digest, entry.size().value()) @@ -78,12 +84,14 @@ impl PayloadForm { } } +/// Either a [`Entry`] or a [`EntryForm`]. #[derive(Debug)] pub enum EntryOrForm { Entry(Entry), Form(EntryForm), } +/// Creates an entry while setting some fields automatically. #[derive(Debug)] pub struct EntryForm { pub namespace_id: NamespaceId, @@ -94,6 +102,8 @@ pub struct EntryForm { } impl EntryForm { + /// Creates a new [`EntryForm`] where the subspace is set to the user authenticating the entry, + /// the timestamp is the current system time, and the payload is set to the provided [`Bytes`]. pub fn new_bytes(namespace_id: NamespaceId, path: Path, payload: impl Into) -> Self { EntryForm { namespace_id, @@ -103,6 +113,11 @@ impl EntryForm { payload: PayloadForm::Bytes(payload.into()), } } + + /// Convert the form into an [`Entry`] by filling the fields with data from the environment and + /// the provided [`Store`]. + /// + /// `user_id` must be set to the user who is authenticating the entry. pub async fn into_entry( self, store: &Store, @@ -129,20 +144,19 @@ impl EntryForm { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum HashForm { - Find(Hash), - Exact(Hash, u64), -} - +/// Select which capability to use for authenticating a new entry. #[derive(Debug, Clone, Serialize, Deserialize, derive_more::From)] pub enum AuthForm { + /// Use any available capability which covers the entry and whose receiver is the provided + /// user. Any(UserId), - // TODO: WriteCapabilityHash + /// Use the provided [`WriteCapability`]. Exact(WriteCapability), } impl AuthForm { + /// Get the user id of the user who is the receiver of the capability selected by this + /// [`AuthForm`]. pub fn user_id(&self) -> UserId { match self { AuthForm::Any(user) => *user, @@ -151,14 +165,21 @@ impl AuthForm { } } +/// Set the subspace either to a provided [`SubspaceId`], or use the user authenticating the entry +/// as subspace. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum SubspaceForm { + /// Set the subspace to the [`UserId`] of the user authenticating the entry. User, + /// Set the subspace to the provided [`SubspaceId`]. Exact(SubspaceId), } +/// Set the timestamp either to the provided [`Timestamp`] or to the current system time. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TimestampForm { + /// Set the timestamp to the current system time. Now, + /// Set the timestamp to the provided value. Exact(Timestamp), } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index c5677cff39..1a20223506 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -330,7 +330,7 @@ mod tests { use tracing::info; use crate::{ - auth::{CapSelector, DelegateTo}, + auth::{CapSelector, DelegateTo, RestrictArea}, engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, net::WillowConn, @@ -390,8 +390,8 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( CapSelector::widest(namespace_id), - AccessMode::Write, - DelegateTo::new(user_betty, None), + AccessMode::ReadWrite, + DelegateTo::new(user_betty, RestrictArea::None), ) .await?; @@ -507,8 +507,8 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( CapSelector::widest(namespace_id), - AccessMode::Write, - DelegateTo::new(user_betty, None), + AccessMode::ReadWrite, + DelegateTo::new(user_betty, RestrictArea::None), ) .await?; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index ade5aec558..83e882e78a 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -27,7 +27,7 @@ pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) - let (capability, signature) = token.as_parts(); capability.is_valid() - && capability.access_mode() == AccessMode::Write + && capability.access_mode() == AccessMode::ReadWrite && capability.granted_area().includes_entry(entry) && capability .receiver() @@ -52,7 +52,7 @@ pub fn attach_authorisation( capability: McCapability, secret_key: &UserSecretKey, ) -> Result { - if capability.access_mode() != AccessMode::Write + if capability.access_mode() != AccessMode::ReadWrite || capability.granted_namespace().id() != entry.namespace_id || !capability.granted_area().includes_entry(&entry) || capability.receiver() != &secret_key.public_key() @@ -276,8 +276,8 @@ impl Encoder for McSubspaceCapability { #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum AccessMode { - Read, - Write, + ReadOnly, + ReadWrite, } /// A capability that authorizes reads or writes in communal namespaces. @@ -398,8 +398,8 @@ impl CommunalCapability { 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; let mut out = std::io::Cursor::new(vec![0u8; len]); let init = match self.access_mode { - AccessMode::Read => 0x00, - AccessMode::Write => 0x01, + AccessMode::ReadOnly => 0x00, + AccessMode::ReadWrite => 0x01, }; out.write_all(&[init])?; out.write_all(&self.namespace_key.to_bytes())?; @@ -507,8 +507,8 @@ impl OwnedCapability { // or the byte 0x03 (if access_mode is write), // followed by the user_key (encoded via encode_user_pk). signable[0] = match access_mode { - AccessMode::Read => 0x02, - AccessMode::Write => 0x03, + AccessMode::ReadOnly => 0x02, + AccessMode::ReadWrite => 0x03, }; signable[1..].copy_from_slice(user_key.as_bytes()); signable @@ -714,7 +714,7 @@ mod tests { let betty_secret = UserSecretKey::generate(&mut rng); let alfie_public = alfie_secret.public_key(); let betty_public = betty_secret.public_key(); - let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::Write); + let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::ReadWrite); cap.validate().expect("cap to be valid"); let cap_betty = cap .delegate(&alfie_secret, betty_public, Area::full()) diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index fa2e8a19af..add058876d 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -126,7 +126,7 @@ impl ReadAuthorisation { } pub fn new_owned(namespace_secret: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { - let read_cap = ReadCapability::new_owned(namespace_secret, user_key, AccessMode::Read); + let read_cap = ReadCapability::new_owned(namespace_secret, user_key, AccessMode::ReadOnly); let subspace_cap = Arc::new(meadowcap::McSubspaceCapability::new( namespace_secret, user_key, diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index f3a138a4bf..b274722efa 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -75,7 +75,7 @@ pub enum Interests { } impl Interests { - pub fn select() -> SelectBuilder { + pub fn builder() -> SelectBuilder { SelectBuilder::default() } } @@ -84,13 +84,13 @@ impl Interests { pub struct SelectBuilder(HashMap); impl SelectBuilder { - pub fn add_full(mut self, cap: impl Into) -> Self { + pub fn add_full_cap(mut self, cap: impl Into) -> Self { let cap = cap.into(); self.0.insert(cap, AreaOfInterestSelector::Widest); self } - pub fn area( + pub fn add_area( mut self, cap: impl Into, aois: impl IntoIterator>, diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 45fa3500da..4966aa74fe 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -5,5 +5,4 @@ pub mod codec; pub mod gen_stream; pub mod queue; pub mod stream; -pub mod task; pub mod time; diff --git a/iroh-willow/src/util/task.rs b/iroh-willow/src/util/task.rs deleted file mode 100644 index aef803574e..0000000000 --- a/iroh-willow/src/util/task.rs +++ /dev/null @@ -1,193 +0,0 @@ -//! Utilities for working with tokio tasks. - -use std::{ - cell::RefCell, - collections::HashMap, - future::{poll_fn, Future}, - pin::Pin, - rc::Rc, - task::{Context, Poll}, -}; - -use futures_concurrency::future::{future_group, FutureGroup}; -use futures_lite::{Stream, StreamExt}; -use tokio::task::AbortHandle; -use tokio::task::JoinError; -use tracing::{Instrument, Span}; - -#[derive(derive_more::Debug, Clone, Copy, Hash, Eq, PartialEq)] -#[debug("{:?}", _0)] -pub struct TaskKey(future_group::Key); - -/// A collection of tasks spawned on a Tokio runtime, associated with hash map keys. -/// -/// Similar to [`tokio::task::JoinSet`] but can also contain local tasks, and each task is -/// identified by a key which is returned upon completion of the task. -/// -/// Uses [`tokio::task::spawn`] and [`tokio::task::spawn_local`] in combination with [`future_group`] for keeping the join handles around. -// -// TODO: Replace with [`tokio::task::JoinMap`] once it doesn't need tokio unstable anymore. -#[derive(Debug)] -pub struct JoinMap { - tasks: future_group::Keyed>, - abort_handles: HashMap, - keys: HashMap, -} - -impl Default for JoinMap { - fn default() -> Self { - Self { - tasks: FutureGroup::new().keyed(), - keys: Default::default(), - abort_handles: Default::default(), - } - } -} - -impl JoinMap { - /// Create a new [`TaskMap`]. - pub fn new() -> Self { - Self::default() - } - - /// Spawn a new task on the currently executing [`tokio::task::LocalSet`]. - pub fn spawn_local + 'static>(&mut self, key: K, future: F) -> TaskKey { - let handle = tokio::task::spawn_local(future); - let abort_handle = handle.abort_handle(); - let k = TaskKey(self.tasks.insert(handle)); - self.keys.insert(k, key); - self.abort_handles.insert(k, abort_handle); - k - } - - /// Poll for one of the tasks in the map to complete. - pub fn poll_join_next( - &mut self, - cx: &mut Context<'_>, - ) -> Poll)>> { - let Some((key, item)) = std::task::ready!(Pin::new(&mut self.tasks).poll_next(cx)) else { - return Poll::Ready(None); - }; - let key = self.keys.remove(&TaskKey(key)).expect("key to exist"); - Poll::Ready(Some((key, item))) - } - - /// Remove a task from the map. - pub fn remove(&mut self, task_key: &TaskKey) -> bool { - self.keys.remove(task_key); - self.tasks.remove(task_key.0) - } - - /// Returns `true` if the task map is currently empty. - pub fn is_empty(&self) -> bool { - self.tasks.is_empty() - } - - /// Returns the number of tasks currently in the map. - pub fn len(&self) -> usize { - self.tasks.len() - } - - pub fn iter(&self) -> impl Iterator { - self.keys.iter().map(|(a, b)| (b, a)) - } - - pub fn abort_all(&mut self) { - for (_, handle) in self.abort_handles.drain() { - handle.abort(); - } - } - - pub async fn shutdown(&mut self) { - self.abort_all(); - while self.next().await.is_some() {} - } -} - -impl JoinMap { - /// Spawn a new, non-local task on the current tokio runtime. - pub fn spawn + 'static + Send>(&mut self, future: F) -> TaskKey { - let handle = tokio::task::spawn(future); - let key = self.tasks.insert(handle); - TaskKey(key) - } -} - -impl Stream for JoinMap { - type Item = (K, Result); - - /// Poll for one of the tasks to complete. - /// - /// See [`Self::poll_join_next`] for details. - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Self::poll_join_next(self.get_mut(), cx) - } -} - -#[derive(Debug)] -pub struct SharedJoinMap(Rc>>); - -impl Clone for SharedJoinMap { - fn clone(&self) -> Self { - Self(Rc::clone(&self.0)) - } -} - -impl Default for SharedJoinMap { - fn default() -> Self { - Self(Default::default()) - } -} - -impl SharedJoinMap -where - K: Unpin, - T: 'static, -{ - pub async fn join_next(&self) -> Option<(K, Result)> { - poll_fn(|cx| { - let mut tasks = self.0.borrow_mut(); - let res = std::task::ready!(Pin::new(&mut tasks).poll_join_next(cx)); - Poll::Ready(res) - }) - .await - } - - pub fn abort_all(&self) { - self.0.borrow_mut().abort_all(); - } - - pub async fn shutdown(&self) { - self.abort_all(); - while self.join_next().await.is_some() {} - } -} - -impl SharedJoinMap { - pub fn spawn(&self, span: Span, fut: Fut) - where - Fut: std::future::Future + 'static, - { - let fut = fut.instrument(span.clone()); - self.0.borrow_mut().spawn_local(span, fut); - } - - pub fn remaining_tasks(&self) -> String { - let tasks = self.0.borrow(); - let mut out = vec![]; - for (span, _k) in tasks.iter() { - let name = span.metadata().unwrap().name(); - out.push(name.to_string()); - } - out.join(",") - } - - pub fn log_remaining_tasks(&self) { - let tasks = self.0.borrow(); - let names = tasks - .iter() - .map(|t| t.0.metadata().unwrap().name()) - .collect::>(); - tracing::debug!(tasks=?names, "active_tasks"); - } -} From b650f649b3700abd7ffcd6ed931cdc2b7f5a37df Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 1 Aug 2024 12:28:35 +0200 Subject: [PATCH 100/198] refactor: move integration tests --- iroh-willow/src/engine.rs | 329 ----------------------------------- iroh-willow/tests/basic.rs | 341 +++++++++++++++++++++++++++++++++++++ 2 files changed, 341 insertions(+), 329 deletions(-) create mode 100644 iroh-willow/tests/basic.rs diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index bdcc0e160e..477833a494 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -72,332 +72,3 @@ impl std::ops::Deref for Engine { &self.actor_handle } } - -#[cfg(test)] -mod tests { - use std::sync::{Arc, Mutex}; - - use anyhow::Result; - use bytes::Bytes; - use futures_concurrency::future::TryJoin; - use futures_lite::StreamExt; - use iroh_net::{Endpoint, NodeId}; - use rand::SeedableRng; - use rand_chacha::ChaCha12Rng; - use rand_core::CryptoRngCore; - use tokio::task::JoinHandle; - - use crate::{ - auth::{CapSelector, DelegateTo, RestrictArea}, - engine::{AcceptOpts, Engine}, - form::EntryForm, - net::ALPN, - proto::{ - grouping::Area, - keys::{NamespaceId, NamespaceKind, UserId}, - meadowcap::AccessMode, - willow::Path, - }, - session::{intents::EventKind, Interests, SessionInit, SessionMode}, - }; - - fn create_rng(seed: &str) -> ChaCha12Rng { - let seed = iroh_base::hash::Hash::new(seed); - ChaCha12Rng::from_seed(*(seed.as_bytes())) - } - - #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_two_intents() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_two_intents"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - - insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; - - let task_foo_path = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"foo"]).unwrap(); - - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - assert!(intent.next().await.is_none()); - } - }); - - let task_bar_path = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"bar"]).unwrap(); - - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - assert!(intent.next().await.is_none()); - } - }); - - task_foo_path.await.unwrap(); - task_bar_path.await.unwrap(); - - [alfie, betty].map(Peer::shutdown).try_join().await?; - - Ok(()) - } - - #[tokio::test(flavor = "multi_thread")] - async fn peer_manager_update_intent() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_update_intent"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - - insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - - let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::Live); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - let path = Path::new(&[b"bar"]).unwrap(); - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - intent.add_interests(interests).await?; - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - intent.close().await; - - assert!(intent.next().await.is_none(),); - - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) - } - - #[derive(Debug, Clone)] - struct Peer { - endpoint: Endpoint, - engine: Engine, - accept_task: Arc>>>>, - } - - impl Peer { - pub async fn spawn( - secret_key: iroh_net::key::SecretKey, - accept_opts: AcceptOpts, - ) -> Result { - let endpoint = Endpoint::builder() - .secret_key(secret_key) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let payloads = iroh_blobs::store::mem::Store::default(); - let create_store = move || crate::store::memory::Store::new(payloads); - let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); - let accept_task = tokio::task::spawn({ - let engine = engine.clone(); - let endpoint = endpoint.clone(); - async move { - while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; - if alpn != ALPN { - continue; - } - let conn = conn.await?; - engine.handle_connection(conn).await?; - } - Result::Ok(()) - } - }); - Ok(Self { - endpoint, - engine, - accept_task: Arc::new(Mutex::new(Some(accept_task))), - }) - } - - pub async fn shutdown(self) -> Result<()> { - let accept_task = self.accept_task.lock().unwrap().take(); - if let Some(accept_task) = accept_task { - accept_task.abort(); - match accept_task.await { - Err(err) if err.is_cancelled() => {} - Ok(Ok(())) => {} - Err(err) => Err(err)?, - Ok(Err(err)) => Err(err)?, - } - } - self.engine.shutdown().await?; - self.endpoint.close(0u8.into(), b"").await?; - Ok(()) - } - - pub fn node_id(&self) -> NodeId { - self.endpoint.node_id() - } - } - - impl std::ops::Deref for Peer { - type Target = Engine; - fn deref(&self) -> &Self::Target { - &self.engine - } - } - - async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { - let peers = [ - iroh_net::key::SecretKey::generate_with_rng(rng), - iroh_net::key::SecretKey::generate_with_rng(rng), - ] - .map(|secret_key| Peer::spawn(secret_key, Default::default())) - .try_join() - .await?; - - peers[0] - .endpoint - .add_node_addr(peers[1].endpoint.node_addr().await?)?; - - peers[1] - .endpoint - .add_node_addr(peers[0].endpoint.node_addr().await?)?; - - Ok(peers) - } - - async fn setup_and_delegate( - alfie: &Engine, - betty: &Engine, - ) -> Result<(NamespaceId, UserId, UserId)> { - let user_alfie = alfie.create_user().await?; - let user_betty = betty.create_user().await?; - - let namespace_id = alfie - .create_namespace(NamespaceKind::Owned, user_alfie) - .await?; - - let cap_for_betty = alfie - .delegate_caps( - CapSelector::widest(namespace_id), - AccessMode::ReadWrite, - DelegateTo::new(user_betty, RestrictArea::None), - ) - .await?; - - betty.import_caps(cap_for_betty).await?; - Ok((namespace_id, user_alfie, user_betty)) - } - - async fn insert( - handle: &Engine, - namespace_id: NamespaceId, - user: UserId, - path: &[&[u8]], - bytes: impl Into, - ) -> Result<()> { - let path = Path::new(path)?; - let entry = EntryForm::new_bytes(namespace_id, path, bytes); - handle.insert(entry, user).await?; - Ok(()) - } -} diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs new file mode 100644 index 0000000000..fb42a51c84 --- /dev/null +++ b/iroh-willow/tests/basic.rs @@ -0,0 +1,341 @@ + +use anyhow::Result; +use futures_concurrency::future::TryJoin; +use futures_lite::StreamExt; + +use iroh_willow::{ + proto::{ + grouping::Area, + willow::Path, + }, + session::{intents::EventKind, Interests, SessionInit, SessionMode}, +}; + +use self::util::{create_rng, spawn_two, insert, setup_and_delegate, Peer}; + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_two_intents() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_two_intents"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; + + let task_foo_path = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"foo"]).unwrap(); + + let init = SessionInit::new( + Interests::builder().add_area(namespace, [Area::path(path.clone())]), + SessionMode::ReconcileOnce, + ); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + let task_bar_path = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"bar"]).unwrap(); + + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + task_foo_path.await.unwrap(); + task_bar_path.await.unwrap(); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_update_intent() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_update_intent"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + + let path = Path::new(&[b"foo"]).unwrap(); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::Live); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::full(), + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + let path = Path::new(&[b"bar"]).unwrap(); + let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); + intent.add_interests(interests).await?; + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::path(path.clone()).into() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::path(path.clone()).into() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + intent.close().await; + + assert!(intent.next().await.is_none(),); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +mod util { + use std::sync::{Arc, Mutex}; + + use anyhow::Result; + use bytes::Bytes; + use futures_concurrency::future::TryJoin; + use iroh_net::{Endpoint, NodeId}; + use rand::SeedableRng; + use rand_chacha::ChaCha12Rng; + use rand_core::CryptoRngCore; + use tokio::task::JoinHandle; + + use iroh_willow::{ + auth::{CapSelector, DelegateTo, RestrictArea}, + engine::{AcceptOpts, Engine}, + form::EntryForm, + net::ALPN, + proto::{ + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + willow::Path, + }, + }; + + pub fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + + #[derive(Debug, Clone)] + pub struct Peer { + endpoint: Endpoint, + engine: Engine, + accept_task: Arc>>>>, + } + + impl Peer { + pub async fn spawn( + secret_key: iroh_net::key::SecretKey, + accept_opts: AcceptOpts, + ) -> Result { + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let payloads = iroh_blobs::store::mem::Store::default(); + let create_store = move || iroh_willow::store::memory::Store::new(payloads); + let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); + let accept_task = tokio::task::spawn({ + let engine = engine.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let alpn = conn.alpn().await?; + if alpn != ALPN { + continue; + } + let conn = conn.await?; + engine.handle_connection(conn).await?; + } + Result::Ok(()) + } + }); + Ok(Self { + endpoint, + engine, + accept_task: Arc::new(Mutex::new(Some(accept_task))), + }) + } + + pub async fn shutdown(self) -> Result<()> { + let accept_task = self.accept_task.lock().unwrap().take(); + if let Some(accept_task) = accept_task { + accept_task.abort(); + match accept_task.await { + Err(err) if err.is_cancelled() => {} + Ok(Ok(())) => {} + Err(err) => Err(err)?, + Ok(Err(err)) => Err(err)?, + } + } + self.engine.shutdown().await?; + self.endpoint.close(0u8.into(), b"").await?; + Ok(()) + } + + pub fn node_id(&self) -> NodeId { + self.endpoint.node_id() + } + } + + impl std::ops::Deref for Peer { + type Target = Engine; + fn deref(&self) -> &Self::Target { + &self.engine + } + } + + pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { + let peers = [ + iroh_net::key::SecretKey::generate_with_rng(rng), + iroh_net::key::SecretKey::generate_with_rng(rng), + ] + .map(|secret_key| Peer::spawn(secret_key, Default::default())) + .try_join() + .await?; + + peers[0] + .endpoint + .add_node_addr(peers[1].endpoint.node_addr().await?)?; + + peers[1] + .endpoint + .add_node_addr(peers[0].endpoint.node_addr().await?)?; + + Ok(peers) + } + + pub async fn setup_and_delegate( + alfie: &Engine, + betty: &Engine, + ) -> Result<(NamespaceId, UserId, UserId)> { + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::ReadWrite, + DelegateTo::new(user_betty, RestrictArea::None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + Ok((namespace_id, user_alfie, user_betty)) + } + + pub async fn insert( + handle: &Engine, + namespace_id: NamespaceId, + user: UserId, + path: &[&[u8]], + bytes: impl Into, + ) -> Result<()> { + let path = Path::new(path)?; + let entry = EntryForm::new_bytes(namespace_id, path, bytes); + handle.insert(entry, user).await?; + Ok(()) + } +} From 5a66d897664f9f5d950cc127426de63e35a0db24 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 1 Aug 2024 12:40:13 +0200 Subject: [PATCH 101/198] docs: utils --- iroh-willow/src/util/gen_stream.rs | 33 +++++++++++++++++------------- iroh-willow/src/util/stream.rs | 1 - 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/iroh-willow/src/util/gen_stream.rs b/iroh-willow/src/util/gen_stream.rs index 2999c93a9e..c23b4ecd20 100644 --- a/iroh-willow/src/util/gen_stream.rs +++ b/iroh-willow/src/util/gen_stream.rs @@ -4,31 +4,36 @@ use std::{ task::{Context, Poll}, }; +use futures_lite::Stream; use genawaiter::{ rc::{Co, Gen}, GeneratorState, }; +/// Wraps a [`Gen`] into a [`Stream`]. +/// +/// The stream yields the items yielded by the generator. +/// The generator's final output can be retrieved via [`Self::final_output`]. #[derive(derive_more::Debug)] -pub struct GenStream +pub struct GenStream where - Fut: Future>, + Fut: Future>, { #[debug("Gen")] - gen: Gen, + gen: Gen, is_complete: bool, - final_output: Option, + final_output: Option, } -impl GenStream +impl GenStream where - Fut: Future>, + Fut: Future>, { - pub fn new(producer: impl FnOnce(Co) -> Fut) -> Self { + pub fn new(producer: impl FnOnce(Co) -> Fut) -> Self { Self::from_gen(Gen::new(producer)) } - pub fn from_gen(gen: Gen) -> Self { + pub fn from_gen(gen: Gen) -> Self { Self { gen, is_complete: false, @@ -36,17 +41,17 @@ where } } - pub fn final_output(self) -> Option { + pub fn final_output(self) -> Option { self.final_output } } -impl futures_lite::Stream for GenStream +impl Stream for GenStream where - Fut: Future>, - O: Unpin, + Fut: Future>, + FinalOutput: Unpin, { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.is_complete { @@ -56,7 +61,7 @@ where let mut fut = self.gen.async_resume(); let out = std::task::ready!(Pin::new(&mut fut).poll(cx)); match out { - GeneratorState::Yielded(output) => (Some(Ok(output)), Option::None), + GeneratorState::Yielded(output) => (Some(Ok(output)), None), GeneratorState::Complete(Ok(final_output)) => (None, Some(final_output)), GeneratorState::Complete(Err(err)) => (Some(Err(err)), None), } diff --git a/iroh-willow/src/util/stream.rs b/iroh-willow/src/util/stream.rs index 96bb841d54..97c1a56843 100644 --- a/iroh-willow/src/util/stream.rs +++ b/iroh-willow/src/util/stream.rs @@ -14,7 +14,6 @@ use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; #[derive(Debug)] pub struct Cancelable { stream: S, - // TODO: Don't allocate here. cancelled: Pin>, is_cancelled: bool, } From ac3d706590c7ed4c4908dcdd531ca6ae4c9366c8 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 7 Aug 2024 12:47:56 +0200 Subject: [PATCH 102/198] fix: improve log levels and namings --- iroh-willow/src/auth.rs | 2 +- iroh-willow/src/engine.rs | 2 +- iroh-willow/src/engine/actor.rs | 4 ++-- iroh-willow/src/session/intents.rs | 17 +++++++++-------- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index a7045a7a4c..f36c2c9ac5 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -146,7 +146,7 @@ pub enum AreaSelector { Widest, /// Use any capability that covers the provided area. ContainsArea(Area), - /// Use any capability that covers the provided point. + /// Use any capability that covers the provided point (i.e. entry). ContainsPoint(Point), } diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 477833a494..bdd41340cb 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -40,7 +40,7 @@ impl Engine { let peer_manager = PeerManager::new(actor_handle.clone(), endpoint, pm_inbox_rx, accept_opts); let peer_manager_task = tokio::task::spawn( async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } - .instrument(error_span!("peer_manager", me = me.fmt_short())), + .instrument(error_span!("peer_manager", me=%me.fmt_short())), ); Engine { actor_handle, diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index a1a9c29567..8e0c942e0d 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -49,9 +49,9 @@ impl ActorHandle { ) -> ActorHandle { let (inbox_tx, inbox_rx) = flume::bounded(INBOX_CAP); let join_handle = std::thread::Builder::new() - .name("willow-actor".to_string()) + .name("willow".to_string()) .spawn(move || { - let span = error_span!("willow-actor", me=%me.fmt_short()); + let span = error_span!("willow", me=%me.fmt_short()); let _guard = span.enter(); let store = Store::new((create_store)()); let actor = Actor::new(store, inbox_rx); diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index b63dce7eab..b22029bf8b 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -14,7 +14,7 @@ use genawaiter::rc::Co; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; use tokio_util::sync::PollSender; -use tracing::debug; +use tracing::{debug, trace, warn}; use crate::{ auth::{Auth, InterestMap}, @@ -231,7 +231,8 @@ impl Stream for IntentHandle { } } -#[derive(Debug)] +#[derive(derive_more::Debug)] +#[debug("IntentChannels")] struct IntentChannels { event_tx: Sender, update_rx: Receiver, @@ -307,11 +308,11 @@ impl IntentDispatcher { while let Some(intent) = self.pending_intents.pop_front() { self.submit_intent(&co, intent).await?; } - debug!("submitted initial intents, start loop"); + trace!("submitted initial intents, start loop"); loop { tokio::select! { input = inbox.next() => { - tracing::debug!(?input, "tick: inbox"); + trace!(?input, "tick: inbox"); let Some(input) = input else { break; }; @@ -321,12 +322,12 @@ impl IntentDispatcher { } } Some((intent_id, event)) = self.intent_update_rx.next(), if !self.intent_update_rx.is_empty() => { - tracing::debug!(?intent_id, ?event, "tick: intent_update"); + trace!(?intent_id, ?event, "tick: intent_update"); match event { Some(event) => { // Received an intent update. if let Err(err) = self.update_intent(&co, intent_id, event).await { - tracing::warn!(%intent_id, ?err, "failed to update intent"); + warn!(%intent_id, ?err, "failed to update intent"); } }, None => { @@ -420,7 +421,7 @@ impl IntentDispatcher { intent_id: u64, update: IntentUpdate, ) -> Result<()> { - debug!(?intent_id, ?update, "intent update"); + trace!(?intent_id, ?update, "intent update"); match update { IntentUpdate::AddInterests(interests) => { let add_interests = self.auth.resolve_interests(interests)?; @@ -438,7 +439,7 @@ impl IntentDispatcher { } async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { - debug!(?intent_id, "cancel intent"); + trace!(?intent_id, "cancel intent"); self.intent_update_rx.remove(&intent_id); self.intents.remove(&intent_id); if self.intents.is_empty() { From 8700e7adffdee768ed0d3017f3587834601d2db0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 01:31:03 +0200 Subject: [PATCH 103/198] wip: refactor net and peer_manager --- iroh-willow/src/auth.rs | 4 +- iroh-willow/src/engine.rs | 30 +- iroh-willow/src/engine/peer_manager.rs | 560 +++++++++++++++++-------- iroh-willow/src/net.rs | 400 ++++++++++-------- iroh-willow/src/proto/sync.rs | 15 +- iroh-willow/src/session/data.rs | 1 - iroh-willow/src/session/reconciler.rs | 18 +- iroh-willow/src/session/run.rs | 37 +- iroh-willow/tests/basic.rs | 8 +- 9 files changed, 673 insertions(+), 400 deletions(-) diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs index f36c2c9ac5..01e6f8ee4f 100644 --- a/iroh-willow/src/auth.rs +++ b/iroh-willow/src/auth.rs @@ -36,14 +36,14 @@ impl DelegateTo { #[derive(Debug, Clone)] pub enum RestrictArea { None, - Restrict(Area) + Restrict(Area), } impl RestrictArea { pub fn with_default(self, default: Area) -> Area { match self { RestrictArea::None => default.clone(), - RestrictArea::Restrict(area) => area + RestrictArea::Restrict(area) => area, } } } diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index bdd41340cb..e37e38f410 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -1,7 +1,7 @@ use anyhow::Result; use iroh_net::{endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId}; -use tokio::sync::mpsc; -use tracing::{error_span, Instrument}; +use tokio::sync::{mpsc, oneshot}; +use tracing::{debug, error, error_span, Instrument}; use crate::{ session::{ @@ -25,7 +25,7 @@ const PEER_MANAGER_INBOX_CAP: usize = 128; pub struct Engine { actor_handle: ActorHandle, peer_manager_inbox: mpsc::Sender, - _peer_manager_task: SharedAbortingJoinHandle>, + peer_manager_task: SharedAbortingJoinHandle>, } impl Engine { @@ -37,7 +37,8 @@ impl Engine { let me = endpoint.node_id(); let actor_handle = ActorHandle::spawn(create_store, me); let (pm_inbox_tx, pm_inbox_rx) = mpsc::channel(PEER_MANAGER_INBOX_CAP); - let peer_manager = PeerManager::new(actor_handle.clone(), endpoint, pm_inbox_rx, accept_opts); + let peer_manager = + PeerManager::new(actor_handle.clone(), endpoint, pm_inbox_rx, accept_opts); let peer_manager_task = tokio::task::spawn( async move { peer_manager.run().await.map_err(|err| format!("{err:?}")) } .instrument(error_span!("peer_manager", me=%me.fmt_short())), @@ -45,7 +46,7 @@ impl Engine { Engine { actor_handle, peer_manager_inbox: pm_inbox_tx, - _peer_manager_task: peer_manager_task.into(), + peer_manager_task: peer_manager_task.into(), } } @@ -63,6 +64,25 @@ impl Engine { .await?; Ok(handle) } + + pub async fn shutdown(self) -> Result<()> { + debug!("shutdown engine"); + let (reply, reply_rx) = oneshot::channel(); + self.peer_manager_inbox + .send(peer_manager::Input::Shutdown { reply }) + .await?; + reply_rx.await?; + let res = self.peer_manager_task.await; + match res { + Err(err) => error!(?err, "peer manager task panicked"), + Ok(Err(err)) => error!(?err, "peer manager task failed"), + Ok(Ok(())) => {} + }; + debug!("shutdown engine: peer manager terminated"); + self.actor_handle.shutdown().await?; + debug!("shutdown engine: willow actor terminated"); + Ok(()) + } } impl std::ops::Deref for Engine { diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 6db8dac314..947d764199 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -1,35 +1,42 @@ -use std::{collections::HashMap, future::Future, sync::Arc}; +use std::{collections::HashMap, future::Future, sync::Arc, time::Duration}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result}; use futures_buffered::join_all; use futures_lite::{future::Boxed, StreamExt}; -use futures_util::FutureExt; +use futures_util::{FutureExt, TryFutureExt}; use iroh_net::{ - endpoint::{get_remote_node_id, Connection, VarInt}, + endpoint::{Connection, ConnectionError}, util::AbortingJoinHandle, Endpoint, NodeId, }; use tokio::{ - sync::mpsc, + sync::{mpsc, oneshot}, task::{AbortHandle, JoinSet}, }; use tokio_stream::{wrappers::ReceiverStream, StreamMap}; -use tracing::{debug, trace}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ - net::{WillowConn, ALPN}, - proto::sync::AccessChallenge, + net::{establish, setup, ConnRunFut, PeerConn, WillowConn, ALPN}, + proto::sync::{AccessChallenge, InitialTransmission}, session::{ intents::{EventKind, Intent}, - Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, + Channels, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, }, }; use super::actor::ActorHandle; -const ERROR_CODE_IGNORE_CONN: VarInt = VarInt::from_u32(1); +/// Our QUIC application error code for graceful connection termination. +const ERROR_CODE_OK: u32 = 1; +/// Our QUIC application error code when closing connections during establishment +/// because we prefer another existing connection to the same peer. +const ERROR_CODE_IGNORE_CONN: u32 = 2; +/// Timeout at shutdown after which we abort connections that failed to terminate gracefully. +const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); /// Customize what to do with incoming connections. /// @@ -53,13 +60,16 @@ impl AcceptOpts { /// Registers a callback to determine the fate of incoming connections. /// /// The callback gets the connecting peer's [`NodeId`] as argument, and must return a future - /// that resolves to `Option<`[SessionInit]``>`. When returning `None`, the session will not be - /// accepted. When returning a `SessionInit`, the session will be accepted with these - /// interests. + /// that resolves to `None` or Some(`[SessionInit]`). + /// When returning `None`, the session will not be accepted. + /// When returning a `SessionInit`, the session will be accepted with these interests. + /// + /// The default behavior, if not registering a callback, is to accept all incoming connections with + /// interests in everything we have and in live session mode. pub fn accept_custom(mut self, cb: F) -> Self where - F: Fn(NodeId) -> Fut + 'static + Send + Sync, - Fut: 'static + Send + Future>, + F: Fn(NodeId) -> Fut + Send + Sync + 'static, + Fut: Future> + Send + 'static, { let cb = Box::new(move |peer: NodeId| { let fut: Boxed> = Box::pin((cb)(peer)); @@ -83,6 +93,7 @@ impl AcceptOpts { } } +/// Input commands for the [`PeerManager`] actor. #[derive(derive_more::Debug)] pub(super) enum Input { SubmitIntent { @@ -93,19 +104,23 @@ pub(super) enum Input { #[debug("Connection")] conn: Connection, }, + Shutdown { + reply: oneshot::Sender<()>, + }, } type AcceptCb = Box Boxed> + Send + Sync + 'static>; -#[derive(derive_more::Debug)] +/// Manages incoming and outgoing connections. +#[derive(Debug)] pub(super) struct PeerManager { actor: ActorHandle, endpoint: Endpoint, inbox: mpsc::Receiver, session_events_rx: StreamMap>, - tasks: JoinSet<(NodeId, Result)>, - peers: HashMap, + peers: HashMap, accept_handlers: AcceptHandlers, + conn_tasks: JoinSet<(NodeId, Result)>, } impl PeerManager { @@ -120,9 +135,9 @@ impl PeerManager { actor: actor_handle, inbox, session_events_rx: Default::default(), - tasks: Default::default(), peers: Default::default(), accept_handlers: AcceptHandlers::new(accept_opts), + conn_tasks: Default::default(), } } @@ -131,19 +146,26 @@ impl PeerManager { tokio::select! { Some(input) = self.inbox.recv() => { trace!(?input, "tick: inbox"); - self.handle_input(input).await; + match input { + Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, + Input::HandleConnection { conn } => self.handle_connection(conn).await, + Input::Shutdown { reply } => { + self.handle_shutdown().await; + reply.send(()).ok(); + break; + } + } } Some((session_id, event)) = self.session_events_rx.next(), if !self.session_events_rx.is_empty() => { trace!(?session_id, ?event, "tick: event"); - self.handle_event(session_id, event); + self.handle_session_event(session_id, event); } - Some(res) = self.tasks.join_next(), if !self.tasks.is_empty() => { - trace!("tick: task joined"); + Some(res) = self.conn_tasks.join_next(), if !self.conn_tasks.is_empty() => { + trace!("tick: conn task joined"); match res { Err(err) if err.is_cancelled() => continue, - Err(err) => Err(err).context("establish task paniced")?, - Ok((_peer, Ok(conn))) => self.on_established(conn).await?, - Ok((peer, Err(err))) => self.on_establish_failed(peer, Arc::new(Error::Net(err))).await, + Err(err) => Err(err).context("conn task panicked")?, + Ok((peer, out)) => self.handle_conn_output(peer, out).await?, } } else => break, @@ -152,170 +174,355 @@ impl PeerManager { Ok(()) } - async fn handle_input(&mut self, input: Input) { - match input { - Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, - Input::HandleConnection { conn } => self.handle_connection(conn).await, - } - } - async fn handle_connection(&mut self, conn: Connection) { - let peer = match get_remote_node_id(&conn) { - Ok(node_id) => node_id, + let conn = match PeerConn::new(conn, Role::Betty, self.endpoint.node_id()) { + Ok(conn) => conn, Err(err) => { - tracing::debug!("ignore incoming connection (failed to get remote node id: {err})"); + debug!("ignore incoming connection (failed to get remote node id: {err})"); return; } }; - let me = self.endpoint.node_id(); - - match self.peers.get_mut(&peer) { - None => { - if let Some(intent) = self.accept_handlers.accept(peer).await { - let abort_handle = self.tasks.spawn( - WillowConn::betty(conn, me, AccessChallenge::generate()) - .map(move |res| (peer, res)), - ); - self.peers.insert( - peer, - PeerState::Pending { - our_role: Role::Betty, - intents: vec![intent], - abort_handle, - }, - ); - } + let peer = conn.peer(); + let Some(intent) = self.accept_handlers.accept(peer).await else { + debug!("ignore incoming connection (accept handler returned none)"); + return; + }; + let peer_info = self + .peers + .entry(peer) + .or_insert_with(|| PeerInfo::new(Role::Betty, peer)); + + match peer_info.state { + PeerState::None => { + let our_nonce = AccessChallenge::generate(); + let fut = async move { + let initial_transmission = establish(&conn, our_nonce).await?; + Ok(ConnStep::Established { + conn, + initial_transmission, + }) + }; + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + peer_info.abort_handle = Some(abort_handle); + peer_info.our_role = Role::Betty; + peer_info.pending_intents.push(intent); + peer_info.state = PeerState::Pending; } - Some(PeerState::Pending { - our_role, - abort_handle, - intents, - }) => { - if *our_role == Role::Betty { - tracing::debug!("ignore incoming connection (already accepting)"); - conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-already-accepting"); - } else if me > peer { - tracing::debug!( - "ignore incoming connection (already dialing and our dial wins)" - ); - conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-our-dial-wins"); - } else if let Some(intent) = self.accept_handlers.accept(peer).await { - // Abort our dial attempt and insert the new abort handle and intent. - abort_handle.abort(); - *abort_handle = self.tasks.spawn( - WillowConn::betty(conn, me, AccessChallenge::generate()) - .map(move |res| (peer, res)), - ); - *our_role = Role::Betty; - intents.push(intent); + PeerState::Pending => { + peer_info.pending_intents.push(intent); + debug!("ignore incoming connection (already pending)"); + conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-already-active"); + } + PeerState::Active { .. } => { + // TODO: push betty intent to session? + debug!("ignore incoming connection (already active)"); + conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-already-active"); + } + } + } + + async fn submit_intent(&mut self, peer: NodeId, intent: Intent) { + let peer_info = self + .peers + .entry(peer) + .or_insert_with(|| PeerInfo::new(Role::Alfie, peer)); + + match peer_info.state { + PeerState::None => { + let our_nonce = AccessChallenge::generate(); + let endpoint = self.endpoint.clone(); + let fut = async move { + let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; + let conn = PeerConn::new(conn, Role::Alfie, endpoint.node_id())?; + let initial_transmission = establish(&conn, our_nonce).await?; + Ok(ConnStep::Established { + conn, + initial_transmission, + }) + }; + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + peer_info.abort_handle = Some(abort_handle); + peer_info.pending_intents.push(intent); + peer_info.state = PeerState::Pending; + } + PeerState::Pending => { + peer_info.pending_intents.push(intent); + } + PeerState::Active { ref update_tx, .. } => { + if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { + let SessionUpdate::SubmitIntent(intent) = err.0; + intent.send_abort(Arc::new(Error::ActorFailed)).await; } } - Some(PeerState::Active { .. }) => { - tracing::debug!("ignore incoming connection (already connected)"); - conn.close(ERROR_CODE_IGNORE_CONN, b"duplicate-already-accepting"); + } + } + + #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] + fn handle_session_event(&mut self, peer: NodeId, event: SessionEvent) { + trace!(peer=%peer.fmt_short(), ?event, "session event"); + match event { + SessionEvent::Established => {} + SessionEvent::Complete { result } => { + debug!(?result, "session complete"); + // TODO: I don't think we need to do anything here. The connection tasks terminate by themselves: + // The send loops are closed from `session::run` via `ChannelSenders::close_all`, + // and the receive loops terminate once the other side closes their send loops. } } } - async fn on_establish_failed(&mut self, peer: NodeId, error: Arc) { - let Some(peer_state) = self.peers.remove(&peer) else { - tracing::warn!(?peer, "connection failure for unknown peer"); - return; - }; - match peer_state { - PeerState::Pending { intents, .. } => { + #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] + async fn handle_conn_output(&mut self, peer: NodeId, out: Result) -> Result<()> { + let peer_info = self + .peers + .get_mut(&peer) + .context("got conn task output for unknown peer")?; + trace!(?peer, out=?out.as_ref().map(|o| format!("{o}")), "conn task output"); + match out { + Err(err) => { + debug!(peer=%peer.fmt_short(), ?err, "conn task failed"); + let err = Arc::new(Error::Net(err)); + let peer = self.peers.remove(&peer).expect("just checked"); join_all( - intents + peer.pending_intents .into_iter() - .map(|intent| intent.send_abort(error.clone())), + .map(|intent| intent.send_abort(err.clone())), ) .await; + // We don't need to cancel the session here. It will terminate because all receiver channels are closed. + return Ok(()); } - PeerState::Active { .. } => { - unreachable!("we never handle connections for active peers") + Ok(ConnStep::Established { + conn, + initial_transmission, + }) => { + // TODO: Check if we want to continue. + let fut = async move { + let result = setup(&conn).await; + result.map(|(channels, fut)| ConnStep::Ready { + conn, + channels, + fut: fut.boxed(), + }) + }; + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + peer_info.state = PeerState::Pending; + peer_info.abort_handle = Some(abort_handle); + peer_info.initial_transmission = Some(initial_transmission); } - }; - } - - async fn on_established(&mut self, conn: WillowConn) -> anyhow::Result<()> { - let peer = conn.peer; - let state = self - .peers - .remove(&peer) - .ok_or_else(|| anyhow!("unreachable: on_established called for unknown peer"))?; - - let PeerState::Pending { intents, .. } = state else { - anyhow::bail!("unreachable: on_established called for peer in wrong state") - }; - - let session_handle = self.actor.init_session(conn, intents).await?; + Ok(ConnStep::Ready { + conn, + channels, + fut, + }) => { + let initial_transmission = peer_info + .initial_transmission + .take() + .context("expedted initial transmission for peer in ready state")?; + debug!("connection ready: init session"); + let willow_conn = WillowConn { + initial_transmission, + channels, + our_role: conn.our_role(), + peer, + }; + let intents = std::mem::take(&mut peer_info.pending_intents); + let session_handle = self.actor.init_session(willow_conn, intents).await?; + + let fut = fut.map_ok(move |()| ConnStep::Done { conn }); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + + let SessionHandle { + cancel_token, + update_tx, + event_rx, + } = session_handle; + self.session_events_rx + .insert(peer, ReceiverStream::new(event_rx)); + + peer_info.state = PeerState::Active { + update_tx, + cancel_token, + }; + peer_info.abort_handle = Some(abort_handle); + } + Ok(ConnStep::Done { conn }) => { + debug!("connection loop finished"); + // TODO: Instead of using our role (alfie vs. betty), the party who sent the last + // meaningful message should wait for the other end to terminate the connection, I think. + // + // In other words, the connection may only be closed by the party who received the last meaningful message. + if conn.our_role() == Role::Alfie { + conn.close(ERROR_CODE_OK.into(), b"bye"); + } + let fut = async move { + let reason = conn.closed().await; + Ok(ConnStep::Closed { conn, reason }) + }; + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + peer_info.abort_handle = Some(abort_handle); + } + Ok(ConnStep::Closed { reason, conn }) => { + // TODO: Instead of using our role (alfie vs. betty), the party who sent the last + // meaningful message should wait for the other end to terminate the connection, I think. + let locally_closed = conn.our_role() == Role::Alfie; + let is_graceful = match &reason { + ConnectionError::LocallyClosed if locally_closed => true, + ConnectionError::ApplicationClosed(frame) + if !locally_closed && frame.error_code == ERROR_CODE_OK.into() => + { + true + } + _ => false, + }; + if !is_graceful { + warn!(?reason, "connection was not closed gracefully"); + } else { + debug!("connection closed gracefully"); + } - let SessionHandle { - cancel_token: _, - update_tx, - event_rx, - } = session_handle; - self.session_events_rx - .insert(peer, ReceiverStream::new(event_rx)); - self.peers.insert(peer, PeerState::Active { update_tx }); + self.peers.remove(&peer); + drop(conn); + } + } Ok(()) } - async fn submit_intent(&mut self, peer: NodeId, intent: Intent) { - match self.peers.get_mut(&peer) { - None => { - let our_nonce = AccessChallenge::generate(); - let abort_handle = self.tasks.spawn({ - let endpoint = self.endpoint.clone(); - async move { - let conn = endpoint.connect_by_node_id(peer, ALPN).await?; - WillowConn::alfie(conn, endpoint.node_id(), our_nonce).await + /// Shuts down all connection tasks. + /// + /// Attempts to shutdown connections for active peers gracefully within [`GRACEFUL_SHUTDOWN_TIMEOUT`]. + /// Aborts connections for not-yet-active peers immediately. + /// Aborts all connections after the graceful timeout elapsed. + async fn handle_shutdown(&mut self) { + for peer in self.peers.values() { + match &peer.state { + PeerState::None => {} + PeerState::Pending => { + // We are in pending state, which means the session has not yet been started. + // Hard-abort the task and let the other peer handle the error. + if let Some(abort_handle) = &peer.abort_handle { + abort_handle.abort(); } - .map(move |res| (peer, res)) - }); - let state = PeerState::Pending { - intents: vec![intent], - abort_handle, - our_role: Role::Alfie, - }; - self.peers.insert(peer, state); - } - Some(PeerState::Pending { intents, .. }) => { - intents.push(intent); + } + PeerState::Active { cancel_token, .. } => { + // We are in active state. We cancel our session, which leads to graceful connection termination. + cancel_token.cancel(); + } } - Some(PeerState::Active { update_tx, .. }) => { - if let Err(message) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { - let SessionUpdate::SubmitIntent(intent) = message.0; - intent.send_abort(Arc::new(Error::ActorFailed)).await; + } + + let join_conns_fut = async { + while let Some(res) = self.conn_tasks.join_next().await { + trace!("tick: conn task joined"); + match res { + Err(err) if err.is_cancelled() => continue, + Err(err) => { + error!(?err, "conn task panicked during shutdown"); + } + Ok((peer, out)) => { + match &out { + Err(_) | Ok(ConnStep::Done { .. }) | Ok(ConnStep::Closed { .. }) => { + if let Err(err) = self.handle_conn_output(peer, out).await { + error!(?err, "conn task output error during shutdown"); + } + } + _ => { + // We should not reach this state, as we abort all tasks that lead to output other than done or close. + // However, tokio docs for `AbortHandle::abort` state that cancelling a task that might + // already have been completed only triggers a cancelled JoinError *most likely*... + warn!(?peer, ?out, "expected tasks that lead to output other than done or close to be aborted"); + } + } + } } } }; - } - fn handle_event(&mut self, peer: NodeId, event: SessionEvent) { - tracing::info!(?event, "event"); - match event { - SessionEvent::Established => {} - SessionEvent::Complete { .. } => { - let state = self.peers.remove(&peer); - debug_assert!(matches!(state, Some(PeerState::Active { .. }))); + match tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, join_conns_fut).await { + Ok(()) => { + debug!("all connections gracefully terminated"); + } + Err(_) => { + debug!( + remaining=self.conn_tasks.len(), + "terminating all connections at shutdown timed out, abort remaining connections" + ); + // TODO: We do not catch panics here. + self.conn_tasks.shutdown().await; } } } } +fn spawn_conn_task( + conn_tasks: &mut JoinSet<(NodeId, Result)>, + peer_info: &PeerInfo, + fut: impl Future> + Send + 'static, +) -> AbortHandle { + let node_id = peer_info.node_id; + let fut = fut + .map(move |res| (node_id, res)) + .instrument(peer_info.span.clone()); + conn_tasks.spawn(fut) +} + +#[derive(Debug)] +struct PeerInfo { + node_id: NodeId, + our_role: Role, + abort_handle: Option, + state: PeerState, + pending_intents: Vec, + initial_transmission: Option, + span: Span, +} + +impl PeerInfo { + fn new(our_role: Role, peer: NodeId) -> Self { + Self { + node_id: peer, + our_role, + abort_handle: None, + state: PeerState::None, + pending_intents: Vec::new(), + initial_transmission: None, + span: error_span!("conn", peer=%peer.fmt_short()), + } + } +} + #[derive(Debug)] enum PeerState { - Pending { - our_role: Role, - intents: Vec, - abort_handle: AbortHandle, - }, + None, + Pending, Active { + cancel_token: CancellationToken, update_tx: mpsc::Sender, }, } +#[derive(derive_more::Debug, strum::Display)] +enum ConnStep { + Established { + conn: PeerConn, + initial_transmission: InitialTransmission, + }, + Ready { + conn: PeerConn, + channels: Channels, + #[debug("ConnRunFut")] + fut: ConnRunFut, + }, + Done { + conn: PeerConn, + }, + Closed { + conn: PeerConn, + reason: ConnectionError, + }, +} + +/// The internal handlers for the [`AcceptOpts]. #[derive(derive_more::Debug)] struct AcceptHandlers { #[debug("{:?}", accept_cb.as_ref().map(|_| "_"))] @@ -352,28 +559,35 @@ impl AcceptHandlers { } } +/// Simple event forwarder to combine the intent event receivers for all betty sessions +/// and send to the event sender configured via [`AcceptOpts]. +/// +/// Runs a forwarding loop in a task. The task is aborted on drop. #[derive(Debug)] struct EventForwarder { _join_handle: AbortingJoinHandle<()>, stream_sender: mpsc::Sender<(NodeId, ReceiverStream)>, } -#[derive(Debug)] -struct EventForwarderActor { - stream_receiver: mpsc::Receiver<(NodeId, ReceiverStream)>, - streams: StreamMap>, - event_sender: mpsc::Sender<(NodeId, EventKind)>, -} - impl EventForwarder { fn new(event_sender: mpsc::Sender<(NodeId, EventKind)>) -> EventForwarder { - let (stream_sender, stream_receiver) = mpsc::channel(16); - let forwarder = EventForwarderActor { - stream_receiver, - streams: Default::default(), - event_sender, - }; - let join_handle = tokio::task::spawn(forwarder.run()); + let (stream_sender, mut stream_receiver) = mpsc::channel(16); + let join_handle = tokio::task::spawn(async move { + let mut streams = StreamMap::new(); + loop { + tokio::select! { + Some((peer, receiver)) = stream_receiver.recv() => { + streams.insert(peer, receiver); + }, + Some((peer, event)) = streams.next() => { + if let Err(_receiver_dropped) = event_sender.send((peer, event)).await { + break; + } + }, + else => break, + } + } + }); EventForwarder { _join_handle: join_handle.into(), stream_sender, @@ -384,21 +598,3 @@ impl EventForwarder { self.stream_sender.send((peer, event_stream)).await.ok(); } } - -impl EventForwarderActor { - async fn run(mut self) { - loop { - tokio::select! { - Some((peer, receiver)) = self.stream_receiver.recv() => { - self.streams.insert(peer, receiver); - }, - Some((peer, event)) = self.streams.next() => { - if let Err(_receiver_dropped) = self.event_sender.send((peer, event)).await { - break; - } - }, - else => break, - } - } - } -} diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 1a20223506..9a117eee72 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,13 +1,13 @@ -use anyhow::ensure; +use std::future::Future; + +use anyhow::{ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; +use futures_lite::future::Boxed; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; use iroh_net::endpoint::{get_remote_node_id, Connection, RecvStream, SendStream}; -use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - task::{JoinHandle, JoinSet}, -}; -use tracing::{debug, error_span, field::Empty, instrument, trace, warn, Instrument, Span}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tracing::{debug, instrument, trace}; use crate::{ proto::sync::{ @@ -29,6 +29,104 @@ use crate::{ pub const CHANNEL_CAP: usize = 1024 * 64; pub const ALPN: &[u8] = b"iroh-willow/0"; +pub type ConnRunFut = Boxed>; + +/// Wrapper around [`iroh_net::endpoint::Connection`] that keeps the remote node's [`NodeId`] and +/// our role (whether we accepted or initiated the connection). +// TODO: Integrate this into iroh_net::endpoint::Connection by making that a struct and not a reexport? Seems universally useful. +#[derive(Debug, Clone)] +pub struct PeerConn { + our_role: Role, + remote_node_id: NodeId, + node_id: NodeId, + conn: iroh_net::endpoint::Connection, +} + +impl std::ops::Deref for PeerConn { + type Target = iroh_net::endpoint::Connection; + fn deref(&self) -> &Self::Target { + &self.conn + } +} + +impl PeerConn { + pub fn new(conn: iroh_net::endpoint::Connection, our_role: Role, me: NodeId) -> Result { + let peer = get_remote_node_id(&conn)?; + Ok(Self { + conn, + node_id: me, + remote_node_id: peer, + our_role, + }) + } + pub fn peer(&self) -> NodeId { + self.remote_node_id + } + + pub fn me(&self) -> NodeId { + self.node_id + } + + pub fn our_role(&self) -> Role { + self.our_role + } +} + +pub async fn dial_and_establish( + endpoint: &iroh_net::Endpoint, + node_id: NodeId, + our_nonce: AccessChallenge, +) -> Result<(PeerConn, InitialTransmission)> { + let conn = endpoint.connect_by_node_id(&node_id, ALPN).await?; + let conn = PeerConn::new(conn, Role::Alfie, endpoint.node_id())?; + let initial_transmission = establish(&conn, our_nonce).await?; + Ok((conn, initial_transmission)) +} + +pub async fn establish(conn: &PeerConn, our_nonce: AccessChallenge) -> Result { + debug!(our_role=?conn.our_role(), "start initial transmission"); + let challenge_hash = our_nonce.hash(); + let mut send_stream = conn.open_uni().await?; + send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; + send_stream.write_all(challenge_hash.as_bytes()).await?; + + let mut recv_stream = conn.accept_uni().await?; + + let their_max_payload_size = { + let power = recv_stream.read_u8().await?; + ensure!(power <= 64, "max payload size too large"); + 2u64.pow(power as u32) + }; + + let mut received_commitment = [0u8; CHALLENGE_HASH_LENGTH]; + recv_stream.read_exact(&mut received_commitment).await?; + debug!(our_role=?conn.our_role(), "initial transmission complete"); + Ok(InitialTransmission { + our_nonce, + received_commitment: ChallengeHash::from_bytes(received_commitment), + their_max_payload_size, + }) + + // let our_role = conn.our_role(); + // let (mut setup_send, mut setup_recv) = match our_role { + // Role::Alfie => conn.open_bi().await?, + // Role::Betty => conn.accept_bi().await?, + // }; + // debug!("setup channel ready"); + + // let initial_transmission = + // exchange_commitments(&mut setup_send, &mut setup_recv, our_nonce).await?; + // Ok(initial_transmission) +} + +pub async fn setup( + conn: &PeerConn, +) -> Result<(Channels, impl Future> + Send + 'static)> { + let our_role = conn.our_role(); + let (channels, fut) = launch_channels(&conn, our_role).await?; + Ok((channels, fut)) +} + #[derive(derive_more::Debug)] pub struct WillowConn { pub(crate) our_role: Role, @@ -37,122 +135,50 @@ pub struct WillowConn { pub(crate) initial_transmission: InitialTransmission, #[debug("Channels")] pub(crate) channels: Channels, - pub(crate) join_handle: JoinHandle>, } impl WillowConn { - pub async fn alfie( - conn: Connection, - me: NodeId, - our_nonce: AccessChallenge, - ) -> anyhow::Result { - Self::connect(conn, me, Role::Alfie, our_nonce).await - } - - pub async fn betty( - conn: Connection, - me: NodeId, - our_nonce: AccessChallenge, - ) -> anyhow::Result { - Self::connect(conn, me, Role::Betty, our_nonce).await - } - + #[cfg(test)] + #[instrument(skip_all, name = "conn", fields(me=%me.fmt_short(), peer=tracing::field::Empty))] async fn connect( conn: Connection, me: NodeId, our_role: Role, our_nonce: AccessChallenge, - ) -> anyhow::Result { - let peer = get_remote_node_id(&conn)?; - let (initial_transmission, channels, mut join_set) = - setup(conn, me, our_role, our_nonce).await?; - let join_handle = tokio::task::spawn(async move { join_all(&mut join_set).await }); - Ok(Self { - peer, + ) -> Result { + let conn = PeerConn::new(conn, our_role, me)?; + tracing::Span::current().record("peer", tracing::field::display(conn.peer().fmt_short())); + let initial_transmission = establish(&conn, our_nonce).await?; + let (channels, fut) = setup(&conn).await?; + tokio::task::spawn(fut); + Ok(WillowConn { initial_transmission, - channels, - join_handle, our_role, + peer: conn.peer(), + channels, }) } } -#[instrument(skip_all, name = "willow_net", fields(me=%me.fmt_short(), peer=Empty))] -async fn setup( - conn: Connection, - me: NodeId, - our_role: Role, - our_nonce: AccessChallenge, -) -> anyhow::Result<(InitialTransmission, Channels, JoinSet>)> { - let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; - Span::current().record("peer", tracing::field::display(peer.fmt_short())); - debug!(?our_role, "connected"); - - let mut tasks = JoinSet::new(); - - let (mut control_send_stream, mut control_recv_stream) = match our_role { - Role::Alfie => conn.open_bi().await?, - Role::Betty => conn.accept_bi().await?, - }; - control_send_stream.set_priority(i32::MAX)?; - debug!("control channel ready"); - - let initial_transmission = exchange_commitments( - &mut control_send_stream, - &mut control_recv_stream, - our_nonce, - ) - .await?; - debug!("exchanged commitments"); - - let (control_send, control_recv) = spawn_channel( - &mut tasks, - Channel::Control, - CHANNEL_CAP, - CHANNEL_CAP, - Guarantees::Unlimited, - control_send_stream, - control_recv_stream, - ); - - let (logical_send, logical_recv) = open_logical_channels(&mut tasks, conn, our_role).await?; - debug!("logical channels ready"); - let channels = Channels { - send: ChannelSenders { - control_send, - logical_send, - }, - recv: ChannelReceivers { - control_recv, - logical_recv, - }, - }; - Ok((initial_transmission, channels, tasks)) -} - #[derive(Debug, thiserror::Error)] #[error("missing channel: {0:?}")] -struct MissingChannel(LogicalChannel); +struct MissingChannel(Channel); -async fn open_logical_channels( - join_set: &mut JoinSet>, - conn: Connection, - our_role: Role, -) -> anyhow::Result<(LogicalChannelSenders, LogicalChannelReceivers)> { - let cap = CHANNEL_CAP; - let channels = LogicalChannel::all(); - let mut channels = match our_role { +type ChannelStreams = [(Channel, SendStream, RecvStream); Channel::COUNT]; + +async fn open_channels(conn: &Connection, our_role: Role) -> Result { + let channels = match our_role { // Alfie opens a quic stream for each logical channel, and sends a single byte with the // channel id. Role::Alfie => { - channels + Channel::all() .map(|ch| { let conn = conn.clone(); async move { let (mut send, recv) = conn.open_bi().await?; send.write_u8(ch.id()).await?; trace!(?ch, "opened bi stream"); - Ok::<_, anyhow::Error>((ch, Some((send, recv)))) + Ok::<_, anyhow::Error>((ch, send, recv)) } }) .try_join() @@ -161,113 +187,145 @@ async fn open_logical_channels( // Betty accepts as many quick streams as there are logical channels, and reads a single // byte on each, which is expected to contain a channel id. Role::Betty => { - channels + Channel::all() .map(|_| async { let (send, mut recv) = conn.accept_bi().await?; - trace!("accepted bi stream"); + // trace!("accepted bi stream"); let channel_id = recv.read_u8().await?; - trace!("read channel id {channel_id}"); - let channel = LogicalChannel::from_id(channel_id)?; - trace!("accepted bi stream for logical channel {channel:?}"); - anyhow::Result::Ok((channel, Some((send, recv)))) + // trace!("read channel id {channel_id}"); + let channel = Channel::from_id(channel_id)?; + trace!(?channel, "accepted bi stream for channel"); + Result::Ok((channel, send, recv)) }) .try_join() .await } }?; + Ok(channels) +} + +fn start_channels( + channels: ChannelStreams, +) -> Result<(Channels, impl Future> + Send)> { + let mut channels = channels.map(|(ch, send, recv)| (ch, Some(prepare_channel(ch, send, recv)))); - let mut take_and_spawn_channel = |channel| { + let mut find = |channel| { channels .iter_mut() .find_map(|(ch, streams)| (*ch == channel).then(|| streams.take())) .flatten() - .map(|(send_stream, recv_stream)| { - spawn_channel( - join_set, - Channel::Logical(channel), - cap, - cap, - Guarantees::Limited(0), - send_stream, - recv_stream, - ) - }) .ok_or(MissingChannel(channel)) }; - let pai = take_and_spawn_channel(LogicalChannel::Intersection)?; - let rec = take_and_spawn_channel(LogicalChannel::Reconciliation)?; - let stt = take_and_spawn_channel(LogicalChannel::StaticToken)?; - let aoi = take_and_spawn_channel(LogicalChannel::AreaOfInterest)?; - let cap = take_and_spawn_channel(LogicalChannel::Capability)?; - let dat = take_and_spawn_channel(LogicalChannel::Data)?; - - Ok(( - LogicalChannelSenders { - intersection_send: pai.0, - reconciliation_send: rec.0, - static_tokens_send: stt.0, - aoi_send: aoi.0, - capability_send: cap.0, - data_send: dat.0, + let ctrl = find(Channel::Control)?; + let pai = find(Channel::Logical(LogicalChannel::Intersection))?; + let rec = find(Channel::Logical(LogicalChannel::Reconciliation))?; + let stt = find(Channel::Logical(LogicalChannel::StaticToken))?; + let aoi = find(Channel::Logical(LogicalChannel::AreaOfInterest))?; + let cap = find(Channel::Logical(LogicalChannel::Capability))?; + let dat = find(Channel::Logical(LogicalChannel::Data))?; + + let fut = (ctrl.2, pai.2, rec.2, stt.2, aoi.2, cap.2, dat.2) + .try_join() + .map_ok(|_| ()); + + let logical_send = LogicalChannelSenders { + intersection_send: pai.0, + reconciliation_send: rec.0, + static_tokens_send: stt.0, + aoi_send: aoi.0, + capability_send: cap.0, + data_send: dat.0, + }; + let logical_recv = LogicalChannelReceivers { + intersection_recv: pai.1.into(), + reconciliation_recv: rec.1.into(), + static_tokens_recv: stt.1.into(), + aoi_recv: aoi.1.into(), + capability_recv: cap.1.into(), + data_recv: dat.1.into(), + }; + let channels = Channels { + send: ChannelSenders { + control_send: ctrl.0, + logical_send, }, - LogicalChannelReceivers { - intersection_recv: pai.1.into(), - reconciliation_recv: rec.1.into(), - static_tokens_recv: stt.1.into(), - aoi_recv: aoi.1.into(), - capability_recv: cap.1.into(), - data_recv: dat.1.into(), + recv: ChannelReceivers { + control_recv: ctrl.1, + logical_recv, }, - )) + }; + Ok((channels, fut)) } -fn spawn_channel( - join_set: &mut JoinSet>, +async fn launch_channels( + conn: &Connection, + our_role: Role, +) -> Result<(Channels, impl Future> + Send)> { + let channels = open_channels(conn, our_role).await?; + start_channels(channels) +} + +fn prepare_channel( ch: Channel, - send_cap: usize, - recv_cap: usize, - guarantees: Guarantees, send_stream: SendStream, recv_stream: RecvStream, -) -> (Sender, Receiver) { - let (sender, outbound_reader) = outbound_channel(send_cap, guarantees); - let (inbound_writer, receiver) = inbound_channel(recv_cap); +) -> ( + Sender, + Receiver, + impl Future> + Send, +) { + let guarantees = match ch { + Channel::Control => Guarantees::Unlimited, + Channel::Logical(_) => Guarantees::Limited(0), + }; + let cap = CHANNEL_CAP; + let (sender, outbound_reader) = outbound_channel(cap, guarantees); + let (inbound_writer, receiver) = inbound_channel(cap); let recv_fut = recv_loop(recv_stream, inbound_writer) - .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))) - .instrument(error_span!("recv", ch=%ch.fmt_short())); - - join_set.spawn(recv_fut); + .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))); let send_fut = send_loop(send_stream, outbound_reader) - .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))) - .instrument(error_span!("send", ch=%ch.fmt_short())); + .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))); - join_set.spawn(send_fut); + let fut = (recv_fut, send_fut).try_join().map_ok(|_| ()); - (sender, receiver) + (sender, receiver, fut) } -async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> anyhow::Result<()> { +async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> Result<()> { + trace!("recv: start"); let max_buffer_size = channel_writer.max_buffer_size(); - while let Some(buf) = recv_stream.read_chunk(max_buffer_size, true).await? { + while let Some(buf) = recv_stream + .read_chunk(max_buffer_size, true) + .await + .context("failed to read from quic stream")? + { + // trace!(len = buf.bytes.len(), "read"); channel_writer.write_all(&buf.bytes[..]).await?; - // trace!(len = buf.bytes.len(), "recv"); + // trace!(len = buf.bytes.len(), "sent"); } + trace!("recv: stream close"); channel_writer.close(); - trace!("close"); + trace!("recv: loop close"); Ok(()) } -async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> anyhow::Result<()> { +async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Result<()> { + trace!("send: start"); while let Some(data) = channel_reader.read_bytes().await { // let len = data.len(); - send_stream.write_chunk(data).await?; + // trace!(len, "send"); + send_stream + .write_chunk(data) + .await + .context("failed to write to quic stream")?; // trace!(len, "sent"); } + trace!("send: close writer"); send_stream.finish().await?; - trace!("close"); + trace!("send: done"); Ok(()) } @@ -275,7 +333,7 @@ async fn exchange_commitments( send_stream: &mut SendStream, recv_stream: &mut RecvStream, our_nonce: AccessChallenge, -) -> anyhow::Result { +) -> Result { let challenge_hash = our_nonce.hash(); send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; send_stream.write_all(challenge_hash.as_bytes()).await?; @@ -295,26 +353,6 @@ async fn exchange_commitments( }) } -pub async fn join_all(join_set: &mut JoinSet>) -> anyhow::Result<()> { - let mut final_result = Ok(()); - let mut joined = 0; - while let Some(res) = join_set.join_next().await { - joined += 1; - trace!("joined {joined} tasks, remaining {}", join_set.len()); - let res = match res { - Ok(Ok(())) => Ok(()), - Ok(Err(err)) => Err(err), - Err(err) => Err(err.into()), - }; - if res.is_err() && final_result.is_ok() { - final_result = res; - } else if res.is_err() { - warn!("join error after initial error: {res:?}"); - } - } - final_result -} - #[cfg(test)] mod tests { use std::{ diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index add058876d..781ba7bed2 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -192,6 +192,19 @@ pub enum Channel { } impl Channel { + pub const COUNT: usize = LogicalChannel::COUNT + 1; + + pub fn all() -> [Channel; LogicalChannel::COUNT + 1] { + // TODO: do this without allocation + // https://users.rust-lang.org/t/how-to-concatenate-array-literals-in-compile-time/21141/3 + [Self::Control] + .into_iter() + .chain(LogicalChannel::VARIANTS.iter().copied().map(Self::Logical)) + .collect::>() + .try_into() + .expect("static length") + } + pub fn fmt_short(&self) -> &'static str { match self { Channel::Control => "Ctl", @@ -206,7 +219,7 @@ impl Channel { } } - pub fn from_id(self, id: u8) -> Result { + pub fn from_id(id: u8) -> Result { match id { 0 => Ok(Self::Control), _ => { diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index fb9ba2fbea..f9c02b8d23 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -81,7 +81,6 @@ impl DataSender { } } } - tracing::debug!("data sender done"); Ok(()) } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index e26b403c05..ad23b7d1af 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -29,9 +29,10 @@ use crate::{ grouping::{AreaOfInterest, ThreeDRange}, keys::NamespaceId, sync::{ - AreaOfInterestHandle, Fingerprint, LengthyEntry, ReconciliationAnnounceEntries, - ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, - ReconciliationSendPayload, ReconciliationTerminatePayload, + AreaOfInterestHandle, Fingerprint, IsHandle, LengthyEntry, + ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, + ReconciliationSendFingerprint, ReconciliationSendPayload, + ReconciliationTerminatePayload, }, willow::PayloadDigest, }, @@ -188,6 +189,11 @@ impl Reconciler { .map .remove(&id) .ok_or(Error::InvalidMessageInCurrentState)?; + debug!( + our_handle = id.0.value(), + their_handle = id.1.value(), + "reconciled area" + ); self.out(Output::ReconciledArea { area: target.intersection.intersection.clone(), namespace: target.namespace(), @@ -256,7 +262,11 @@ impl TargetMap { let snapshot = shared.store.entries().snapshot()?; let target = Target::init(snapshot, shared, intersection).await?; let id = target.id(); - tracing::info!("init {id:?}"); + debug!( + our_handle = id.0.value(), + their_handle = id.1.value(), + "init area" + ); self.map.insert(id, target); Ok(id) } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 5604936f15..09e150fcfe 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -6,7 +6,7 @@ use strum::IntoEnumIterator; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, Instrument, Span}; +use tracing::{debug, error_span, trace, Instrument, Span}; use crate::{ net::WillowConn, @@ -50,7 +50,6 @@ pub async fn run_session( initial_transmission, our_role, channels, - join_handle, } = conn; let Channels { send: channel_sender, @@ -116,25 +115,25 @@ pub async fn run_session( (None, None) }; - let net_fut = with_span(error_span!("net"), async { - // TODO: awaiting the net task handle hangs - drop(join_handle); - // let res = join_handle.await; - // debug!(?res, "net tasks finished"); - // match res { - // Ok(Ok(())) => Ok(()), - // Ok(Err(err)) => Err(Error::Net(err)), - // Err(err) => Err(Error::Net(err.into())), - // } - Ok(()) - }); + // let net_fut = with_span(error_span!("net"), async { + // // TODO: awaiting the net task handle hangs + // drop(join_handle); + // // let res = join_handle.await; + // // debug!(?res, "net tasks finished"); + // // match res { + // // Ok(Ok(())) => Ok(()), + // // Ok(Err(err)) => Err(Error::Net(err)), + // // Err(err) => Err(Error::Net(err.into())), + // // } + // Ok(()) + // }); let mut intents = intents::IntentDispatcher::new(store.auth().clone(), initial_intents); let intents_fut = with_span(error_span!("intents"), async { use intents::Output; let mut intents_gen = intents.run_gen(intents_inbox_rx); while let Some(output) = intents_gen.try_next().await? { - debug!(?output, "yield"); + trace!(?output, "yield"); match output { Output::SubmitInterests(interests) => { intersection_inbox @@ -168,7 +167,7 @@ pub async fn run_session( while let Some(message) = data_recv.try_next().await? { data_receiver.on_message(message).await?; } - tracing::debug!("data receiver done"); + trace!("data receiver terminated"); Ok(()) }; (send_fut, recv_fut).try_join().await?; @@ -342,7 +341,7 @@ pub async fn run_session( }); let result = ( - net_fut, + // net_fut, intents_fut, control_loop, data_loop, @@ -481,9 +480,9 @@ async fn with_span( fut: impl Future>, ) -> Result { async { - tracing::debug!("start"); + trace!("start"); let res = fut.await; - tracing::debug!(?res, "done"); + trace!(?res, "done"); res } .instrument(span) diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index fb42a51c84..66eda67dfd 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,17 +1,15 @@ +use std::time::Duration; use anyhow::Result; use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; use iroh_willow::{ - proto::{ - grouping::Area, - willow::Path, - }, + proto::{grouping::Area, willow::Path}, session::{intents::EventKind, Interests, SessionInit, SessionMode}, }; -use self::util::{create_rng, spawn_two, insert, setup_and_delegate, Peer}; +use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; #[tokio::test(flavor = "multi_thread")] async fn peer_manager_two_intents() -> Result<()> { From 87a42f4c062dcfaa68e7f3b317ed87e2919c4ccf Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 01:31:14 +0200 Subject: [PATCH 104/198] refactor: finalize net and peer_manager refactor --- iroh-willow/src/engine/actor.rs | 8 +- iroh-willow/src/engine/peer_manager.rs | 77 +++----- iroh-willow/src/lib.rs | 3 +- iroh-willow/src/net.rs | 264 ++++++------------------- iroh-willow/src/session.rs | 6 +- iroh-willow/src/session/run.rs | 8 +- iroh-willow/src/util/channel.rs | 18 +- iroh-willow/tests/basic.rs | 2 - 8 files changed, 113 insertions(+), 273 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 8e0c942e0d..d0b98c3ff1 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -13,7 +13,7 @@ use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ auth::{CapSelector, CapabilityPack, DelegateTo, InterestMap}, form::{AuthForm, EntryForm, EntryOrForm}, - net::WillowConn, + net::ConnHandle, proto::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, @@ -135,9 +135,9 @@ impl ActorHandle { Ok(rx.into_stream()) } - pub async fn init_session( + pub(crate) async fn init_session( &self, - conn: WillowConn, + conn: ConnHandle, intents: Vec, ) -> Result { let (reply, reply_rx) = oneshot::channel(); @@ -224,7 +224,7 @@ impl Drop for ActorHandle { #[derive(derive_more::Debug, strum::Display)] pub enum Input { InitSession { - conn: WillowConn, + conn: ConnHandle, intents: Vec, reply: oneshot::Sender>, }, diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 947d764199..90edbf26cf 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -6,7 +6,7 @@ use futures_buffered::join_all; use futures_lite::{future::Boxed, StreamExt}; use futures_util::{FutureExt, TryFutureExt}; use iroh_net::{ - endpoint::{Connection, ConnectionError}, + endpoint::{get_remote_node_id, Connection, ConnectionError}, util::AbortingJoinHandle, Endpoint, NodeId, }; @@ -20,11 +20,11 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ - net::{establish, setup, ConnRunFut, PeerConn, WillowConn, ALPN}, + net::{establish, prepare_channels, ChannelStreams, ConnHandle, ALPN}, proto::sync::{AccessChallenge, InitialTransmission}, session::{ intents::{EventKind, Intent}, - Channels, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, + Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, }, }; @@ -175,14 +175,13 @@ impl PeerManager { } async fn handle_connection(&mut self, conn: Connection) { - let conn = match PeerConn::new(conn, Role::Betty, self.endpoint.node_id()) { - Ok(conn) => conn, + let peer = match get_remote_node_id(&conn) { + Ok(peer) => peer, Err(err) => { debug!("ignore incoming connection (failed to get remote node id: {err})"); return; } }; - let peer = conn.peer(); let Some(intent) = self.accept_handlers.accept(peer).await else { debug!("ignore incoming connection (accept handler returned none)"); return; @@ -196,10 +195,12 @@ impl PeerManager { PeerState::None => { let our_nonce = AccessChallenge::generate(); let fut = async move { - let initial_transmission = establish(&conn, our_nonce).await?; - Ok(ConnStep::Established { + let (initial_transmission, channel_streams) = + establish(&conn, Role::Betty, our_nonce).await?; + Ok(ConnStep::Ready { conn, initial_transmission, + channel_streams, }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); @@ -233,11 +234,12 @@ impl PeerManager { let endpoint = self.endpoint.clone(); let fut = async move { let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; - let conn = PeerConn::new(conn, Role::Alfie, endpoint.node_id())?; - let initial_transmission = establish(&conn, our_nonce).await?; - Ok(ConnStep::Established { + let (initial_transmission, channel_streams) = + establish(&conn, Role::Alfie, our_nonce).await?; + Ok(ConnStep::Ready { conn, initial_transmission, + channel_streams, }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); @@ -292,38 +294,18 @@ impl PeerManager { // We don't need to cancel the session here. It will terminate because all receiver channels are closed. return Ok(()); } - Ok(ConnStep::Established { - conn, - initial_transmission, - }) => { - // TODO: Check if we want to continue. - let fut = async move { - let result = setup(&conn).await; - result.map(|(channels, fut)| ConnStep::Ready { - conn, - channels, - fut: fut.boxed(), - }) - }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); - peer_info.state = PeerState::Pending; - peer_info.abort_handle = Some(abort_handle); - peer_info.initial_transmission = Some(initial_transmission); - } Ok(ConnStep::Ready { conn, - channels, - fut, + initial_transmission, + channel_streams, }) => { - let initial_transmission = peer_info - .initial_transmission - .take() - .context("expedted initial transmission for peer in ready state")?; + // TODO: Here we should check again that we are not establishing a duplicate connection. debug!("connection ready: init session"); - let willow_conn = WillowConn { + let (channels, fut) = prepare_channels(channel_streams)?; + let willow_conn = ConnHandle { initial_transmission, channels, - our_role: conn.our_role(), + our_role: peer_info.our_role, peer, }; let intents = std::mem::take(&mut peer_info.pending_intents); @@ -352,7 +334,7 @@ impl PeerManager { // meaningful message should wait for the other end to terminate the connection, I think. // // In other words, the connection may only be closed by the party who received the last meaningful message. - if conn.our_role() == Role::Alfie { + if peer_info.our_role == Role::Alfie { conn.close(ERROR_CODE_OK.into(), b"bye"); } let fut = async move { @@ -365,7 +347,7 @@ impl PeerManager { Ok(ConnStep::Closed { reason, conn }) => { // TODO: Instead of using our role (alfie vs. betty), the party who sent the last // meaningful message should wait for the other end to terminate the connection, I think. - let locally_closed = conn.our_role() == Role::Alfie; + let locally_closed = peer_info.our_role == Role::Alfie; let is_graceful = match &reason { ConnectionError::LocallyClosed if locally_closed => true, ConnectionError::ApplicationClosed(frame) @@ -473,7 +455,6 @@ struct PeerInfo { abort_handle: Option, state: PeerState, pending_intents: Vec, - initial_transmission: Option, span: Span, } @@ -485,7 +466,6 @@ impl PeerInfo { abort_handle: None, state: PeerState::None, pending_intents: Vec::new(), - initial_transmission: None, span: error_span!("conn", peer=%peer.fmt_short()), } } @@ -503,21 +483,16 @@ enum PeerState { #[derive(derive_more::Debug, strum::Display)] enum ConnStep { - Established { - conn: PeerConn, - initial_transmission: InitialTransmission, - }, Ready { - conn: PeerConn, - channels: Channels, - #[debug("ConnRunFut")] - fut: ConnRunFut, + conn: Connection, + initial_transmission: InitialTransmission, + channel_streams: ChannelStreams, }, Done { - conn: PeerConn, + conn: Connection, }, Closed { - conn: PeerConn, + conn: Connection, reason: ConnectionError, }, } diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index ee0f197c82..90bb205ff3 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -1,6 +1,7 @@ //! Implementation of willow #![allow(missing_docs)] +#![deny(unsafe_code)] pub mod auth; pub mod engine; @@ -9,4 +10,4 @@ pub mod net; pub mod proto; pub mod session; pub mod store; -pub mod util; +mod util; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9a117eee72..825e97e477 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -2,12 +2,11 @@ use std::future::Future; use anyhow::{ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; -use futures_lite::future::Boxed; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; -use iroh_net::endpoint::{get_remote_node_id, Connection, RecvStream, SendStream}; +use iroh_net::endpoint::{Connection, RecvStream, SendStream}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, trace}; use crate::{ proto::sync::{ @@ -27,64 +26,46 @@ use crate::{ }; pub const CHANNEL_CAP: usize = 1024 * 64; -pub const ALPN: &[u8] = b"iroh-willow/0"; - -pub type ConnRunFut = Boxed>; - -/// Wrapper around [`iroh_net::endpoint::Connection`] that keeps the remote node's [`NodeId`] and -/// our role (whether we accepted or initiated the connection). -// TODO: Integrate this into iroh_net::endpoint::Connection by making that a struct and not a reexport? Seems universally useful. -#[derive(Debug, Clone)] -pub struct PeerConn { - our_role: Role, - remote_node_id: NodeId, - node_id: NodeId, - conn: iroh_net::endpoint::Connection, -} - -impl std::ops::Deref for PeerConn { - type Target = iroh_net::endpoint::Connection; - fn deref(&self) -> &Self::Target { - &self.conn - } -} - -impl PeerConn { - pub fn new(conn: iroh_net::endpoint::Connection, our_role: Role, me: NodeId) -> Result { - let peer = get_remote_node_id(&conn)?; - Ok(Self { - conn, - node_id: me, - remote_node_id: peer, - our_role, - }) - } - pub fn peer(&self) -> NodeId { - self.remote_node_id - } - pub fn me(&self) -> NodeId { - self.node_id - } +/// The ALPN protocol name for iroh-willow. +pub const ALPN: &[u8] = b"iroh-willow/0"; - pub fn our_role(&self) -> Role { - self.our_role - } +/// The handle to an active peer connection. +/// +/// This is passed into the session loop, where it is used to send and receive messages +/// on the control and logical channels. It also contains the data of the initial transmission. +#[derive(derive_more::Debug)] +pub(crate) struct ConnHandle { + pub(crate) our_role: Role, + pub(crate) peer: NodeId, + #[debug("InitialTransmission")] + pub(crate) initial_transmission: InitialTransmission, + #[debug("Channels")] + pub(crate) channels: Channels, } -pub async fn dial_and_establish( - endpoint: &iroh_net::Endpoint, - node_id: NodeId, +/// Establish the connection by running the initial transmission and +/// opening the streams for the control and logical channels. +pub(crate) async fn establish( + conn: &Connection, + our_role: Role, our_nonce: AccessChallenge, -) -> Result<(PeerConn, InitialTransmission)> { - let conn = endpoint.connect_by_node_id(&node_id, ALPN).await?; - let conn = PeerConn::new(conn, Role::Alfie, endpoint.node_id())?; - let initial_transmission = establish(&conn, our_nonce).await?; - Ok((conn, initial_transmission)) +) -> Result<(InitialTransmission, ChannelStreams)> { + // Run the initial transmission (which works on uni streams) concurrently + // with opening/accepting the bi streams for the channels. + ( + initial_transmission(conn, our_nonce), + open_channel_streams(conn, our_role), + ) + .try_join() + .await } -pub async fn establish(conn: &PeerConn, our_nonce: AccessChallenge) -> Result { - debug!(our_role=?conn.our_role(), "start initial transmission"); +async fn initial_transmission( + conn: &Connection, + our_nonce: AccessChallenge, +) -> Result { + debug!("start initial transmission"); let challenge_hash = our_nonce.hash(); let mut send_stream = conn.open_uni().await?; send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; @@ -100,73 +81,21 @@ pub async fn establish(conn: &PeerConn, our_nonce: AccessChallenge) -> Result conn.open_bi().await?, - // Role::Betty => conn.accept_bi().await?, - // }; - // debug!("setup channel ready"); - - // let initial_transmission = - // exchange_commitments(&mut setup_send, &mut setup_recv, our_nonce).await?; - // Ok(initial_transmission) -} - -pub async fn setup( - conn: &PeerConn, -) -> Result<(Channels, impl Future> + Send + 'static)> { - let our_role = conn.our_role(); - let (channels, fut) = launch_channels(&conn, our_role).await?; - Ok((channels, fut)) -} - -#[derive(derive_more::Debug)] -pub struct WillowConn { - pub(crate) our_role: Role, - pub(crate) peer: NodeId, - #[debug("InitialTransmission")] - pub(crate) initial_transmission: InitialTransmission, - #[debug("Channels")] - pub(crate) channels: Channels, -} - -impl WillowConn { - #[cfg(test)] - #[instrument(skip_all, name = "conn", fields(me=%me.fmt_short(), peer=tracing::field::Empty))] - async fn connect( - conn: Connection, - me: NodeId, - our_role: Role, - our_nonce: AccessChallenge, - ) -> Result { - let conn = PeerConn::new(conn, our_role, me)?; - tracing::Span::current().record("peer", tracing::field::display(conn.peer().fmt_short())); - let initial_transmission = establish(&conn, our_nonce).await?; - let (channels, fut) = setup(&conn).await?; - tokio::task::spawn(fut); - Ok(WillowConn { - initial_transmission, - our_role, - peer: conn.peer(), - channels, - }) - } } #[derive(Debug, thiserror::Error)] #[error("missing channel: {0:?}")] struct MissingChannel(Channel); -type ChannelStreams = [(Channel, SendStream, RecvStream); Channel::COUNT]; +pub(crate) type ChannelStreams = [(Channel, SendStream, RecvStream); Channel::COUNT]; -async fn open_channels(conn: &Connection, our_role: Role) -> Result { +async fn open_channel_streams(conn: &Connection, our_role: Role) -> Result { let channels = match our_role { // Alfie opens a quic stream for each logical channel, and sends a single byte with the // channel id. @@ -204,7 +133,7 @@ async fn open_channels(conn: &Connection, our_role: Role) -> Result Result<(Channels, impl Future> + Send)> { let mut channels = channels.map(|(ch, send, recv)| (ch, Some(prepare_channel(ch, send, recv)))); @@ -258,14 +187,6 @@ fn start_channels( Ok((channels, fut)) } -async fn launch_channels( - conn: &Connection, - our_role: Role, -) -> Result<(Channels, impl Future> + Send)> { - let channels = open_channels(conn, our_role).await?; - start_channels(channels) -} - fn prepare_channel( ch: Channel, send_stream: SendStream, @@ -329,30 +250,6 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul Ok(()) } -async fn exchange_commitments( - send_stream: &mut SendStream, - recv_stream: &mut RecvStream, - our_nonce: AccessChallenge, -) -> Result { - let challenge_hash = our_nonce.hash(); - send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; - send_stream.write_all(challenge_hash.as_bytes()).await?; - - let their_max_payload_size = { - let power = recv_stream.read_u8().await?; - ensure!(power <= 64, "max payload size too large"); - 2u64.pow(power as u32) - }; - - let mut received_commitment = [0u8; CHALLENGE_HASH_LENGTH]; - recv_stream.read_exact(&mut received_commitment).await?; - Ok(InitialTransmission { - our_nonce, - received_commitment: ChallengeHash::from_bytes(received_commitment), - their_max_payload_size, - }) -} - #[cfg(test)] mod tests { use std::{ @@ -360,18 +257,19 @@ mod tests { time::{Duration, Instant}, }; + use anyhow::Result; use futures_lite::StreamExt; use iroh_base::key::SecretKey; use iroh_net::{endpoint::Connection, Endpoint, NodeAddr, NodeId}; use rand::SeedableRng; use rand_chacha::ChaCha12Rng; - use tracing::info; + use tracing::{info, Instrument}; use crate::{ auth::{CapSelector, DelegateTo, RestrictArea}, engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, - net::WillowConn, + net::ConnHandle, proto::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId}, @@ -382,6 +280,8 @@ mod tests { session::{intents::Intent, Interests, Role, SessionHandle, SessionInit, SessionMode}, }; + use super::{establish, prepare_channels}; + const ALPN: &[u8] = b"iroh-willow/0"; fn create_rng(seed: &str) -> ChaCha12Rng { @@ -396,14 +296,26 @@ mod tests { our_role: Role, our_nonce: AccessChallenge, intents: Vec, - ) -> anyhow::Result { - let conn = WillowConn::connect(conn, me, our_role, our_nonce).await?; - let handle = actor.init_session(conn, intents).await?; + ) -> Result { + let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; + let span = tracing::error_span!("conn", me=%me.fmt_short(), peer=%peer.fmt_short()); + let (initial_transmission, channel_streams) = establish(&conn, our_role, our_nonce) + .instrument(span.clone()) + .await?; + let (channels, fut) = prepare_channels(channel_streams)?; + tokio::task::spawn(fut.instrument(span)); + let willow_conn = ConnHandle { + initial_transmission, + our_role, + peer, + channels, + }; + let handle = actor.init_session(willow_conn, intents).await?; Ok(handle) } #[tokio::test(flavor = "multi_thread")] - async fn net_smoke() -> anyhow::Result<()> { + async fn net_smoke() -> Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = create_rng("net_smoke"); let n_betty = parse_env_var("N_BETTY", 100); @@ -525,7 +437,7 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - async fn net_live_data() -> anyhow::Result<()> { + async fn net_live_data() -> Result<()> { iroh_test::logging::setup_multithreaded(); let mut rng = create_rng("net_live_data"); @@ -681,7 +593,7 @@ mod tests { pub async fn create_endpoint( rng: &mut rand_chacha::ChaCha12Rng, - ) -> anyhow::Result<(Endpoint, NodeId, NodeAddr)> { + ) -> Result<(Endpoint, NodeId, NodeAddr)> { let ep = Endpoint::builder() .secret_key(SecretKey::generate_with_rng(rng)) .alpns(vec![ALPN.to_vec()]) @@ -692,11 +604,8 @@ mod tests { Ok((ep, node_id, addr)) } - async fn get_entries( - store: &ActorHandle, - namespace: NamespaceId, - ) -> anyhow::Result> { - let entries: anyhow::Result> = store + async fn get_entries(store: &ActorHandle, namespace: NamespaceId) -> Result> { + let entries: Result> = store .get_entries(namespace, ThreeDRange::full()) .await? .try_collect() @@ -712,7 +621,7 @@ mod tests { path_fn: impl Fn(usize) -> Result, content_fn: impl Fn(usize) -> String, track_entries: &mut impl Extend, - ) -> anyhow::Result<()> { + ) -> Result<()> { for i in 0..count { let payload = content_fn(i).as_bytes().to_vec(); let path = path_fn(i).expect("invalid path"); @@ -742,47 +651,4 @@ mod tests { Err(_) => default, } } - - // async fn get_entries_debug( - // store: &StoreHandle, - // namespace: NamespaceId, - // ) -> anyhow::Result> { - // let entries = get_entries(store, namespace).await?; - // let mut entries: Vec<_> = entries - // .into_iter() - // .map(|e| (e.subspace_id, e.path)) - // .collect(); - // entries.sort(); - // Ok(entries) - // } - // - // - // - // tokio::task::spawn({ - // let handle_alfie = handle_alfie.clone(); - // let handle_betty = handle_betty.clone(); - // async move { - // loop { - // info!( - // "alfie count: {}", - // handle_alfie - // .get_entries(namespace_id, ThreeDRange::full()) - // .await - // .unwrap() - // .count() - // .await - // ); - // info!( - // "betty count: {}", - // handle_betty - // .get_entries(namespace_id, ThreeDRange::full()) - // .await - // .unwrap() - // .count() - // .await - // ); - // tokio::time::sleep(Duration::from_secs(1)).await; - // } - // } - // }); } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index b274722efa..f04e0db2cf 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -25,9 +25,9 @@ mod resource; mod run; mod static_tokens; -pub use self::channels::Channels; -pub use self::error::Error; -pub use self::run::run_session; +pub(crate) use self::channels::Channels; +pub(crate) use self::error::Error; +pub(crate) use self::run::run_session; pub type SessionId = u64; diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 09e150fcfe..ede29aa4c3 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -9,7 +9,7 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error_span, trace, Instrument, Span}; use crate::{ - net::WillowConn, + net::ConnHandle, proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{ aoi_finder::{self, IntersectionFinder}, @@ -36,16 +36,16 @@ use super::{ const INITIAL_GUARANTEES: u64 = u64::MAX; -pub async fn run_session( +pub(crate) async fn run_session( store: Store, - conn: WillowConn, + conn: ConnHandle, initial_intents: Vec, cancel_token: CancellationToken, session_id: SessionId, event_sender: EventSender, update_receiver: impl Stream + Unpin + 'static, ) -> Result<(), Arc> { - let WillowConn { + let ConnHandle { peer: _, initial_transmission, our_role, diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index b86d179860..0f4d67ef22 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -14,15 +14,15 @@ use tokio::io::AsyncWrite; use crate::util::codec::{DecodeOutcome, Decoder, Encoder}; -/// Create an in-memory pipe. -pub fn pipe(cap: usize) -> (Writer, Reader) { - let shared = Shared::new(cap, Guarantees::Unlimited); - let writer = Writer { - shared: shared.clone(), - }; - let reader = Reader { shared }; - (writer, reader) -} +// /// Create an in-memory pipe. +// pub fn pipe(cap: usize) -> (Writer, Reader) { +// let shared = Shared::new(cap, Guarantees::Unlimited); +// let writer = Writer { +// shared: shared.clone(), +// }; +// let reader = Reader { shared }; +// (writer, reader) +// } /// Create a new channel with a message [`Sender`] on the transmit side and a byte [`Reader`] on /// the receive side. diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 66eda67dfd..6b9ffd78d3 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use anyhow::Result; use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; From 21aee12d3fdeb6a33169c1515af944d5b4866764 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 01:32:47 +0200 Subject: [PATCH 105/198] chore: cleanup --- iroh-willow/src/session/run.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index ede29aa4c3..b542bb4691 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -115,19 +115,6 @@ pub(crate) async fn run_session( (None, None) }; - // let net_fut = with_span(error_span!("net"), async { - // // TODO: awaiting the net task handle hangs - // drop(join_handle); - // // let res = join_handle.await; - // // debug!(?res, "net tasks finished"); - // // match res { - // // Ok(Ok(())) => Ok(()), - // // Ok(Err(err)) => Err(Error::Net(err)), - // // Err(err) => Err(Error::Net(err.into())), - // // } - // Ok(()) - // }); - let mut intents = intents::IntentDispatcher::new(store.auth().clone(), initial_intents); let intents_fut = with_span(error_span!("intents"), async { use intents::Output; @@ -341,7 +328,6 @@ pub(crate) async fn run_session( }); let result = ( - // net_fut, intents_fut, control_loop, data_loop, From 04b2f47eed0612e422ea6efe0482b284d7b25fc7 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 10:26:30 +0200 Subject: [PATCH 106/198] feat: graceful connection termination --- iroh-willow/src/engine/peer_manager.rs | 174 ++++++++++++------------- iroh-willow/src/proto/sync.rs | 6 +- iroh-willow/src/session.rs | 24 +++- iroh-willow/src/session/intents.rs | 2 +- iroh-willow/src/session/run.rs | 69 ++++++++-- iroh-willow/tests/basic.rs | 44 ++++++- 6 files changed, 213 insertions(+), 106 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 90edbf26cf..c790f06e25 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -16,8 +16,8 @@ use tokio::{ }; use tokio_stream::{wrappers::ReceiverStream, StreamMap}; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error, error_span, instrument, trace, warn, Instrument, Span}; +use tokio_util::{either::Either, sync::CancellationToken}; +use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ net::{establish, prepare_channels, ChannelStreams, ConnHandle, ALPN}, @@ -25,6 +25,7 @@ use crate::{ session::{ intents::{EventKind, Intent}, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, + WhoCancelled, }, }; @@ -121,6 +122,7 @@ pub(super) struct PeerManager { peers: HashMap, accept_handlers: AcceptHandlers, conn_tasks: JoinSet<(NodeId, Result)>, + shutting_down: bool, } impl PeerManager { @@ -138,39 +140,65 @@ impl PeerManager { peers: Default::default(), accept_handlers: AcceptHandlers::new(accept_opts), conn_tasks: Default::default(), + shutting_down: false, } } pub(super) async fn run(mut self) -> Result<(), Error> { + let mut shutdown_reply = None; + let shutdown_timeout = Either::Left(std::future::pending::<()>()); + tokio::pin!(shutdown_timeout); loop { tokio::select! { - Some(input) = self.inbox.recv() => { + Some(input) = self.inbox.recv(), if !self.shutting_down => { trace!(?input, "tick: inbox"); match input { Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, Input::HandleConnection { conn } => self.handle_connection(conn).await, Input::Shutdown { reply } => { - self.handle_shutdown().await; - reply.send(()).ok(); - break; + self.init_shutdown(); + if self.conn_tasks.is_empty() { + reply.send(()).ok(); + break; + } else { + shutdown_reply = Some(reply); + shutdown_timeout.set(Either::Right(tokio::time::sleep(GRACEFUL_SHUTDOWN_TIMEOUT))); + } } } } + _ = &mut shutdown_timeout => { + trace!("tick: shutdown timeout"); + debug!( + remaining=self.conn_tasks.len(), + "terminating all connections timed out, abort remaining connections" + ); + // TODO: We do not catch panics here. + self.conn_tasks.shutdown().await; + break; + } Some((session_id, event)) = self.session_events_rx.next(), if !self.session_events_rx.is_empty() => { trace!(?session_id, ?event, "tick: event"); self.handle_session_event(session_id, event); } Some(res) = self.conn_tasks.join_next(), if !self.conn_tasks.is_empty() => { - trace!("tick: conn task joined"); + trace!(active=self.conn_tasks.len(), "tick: conn task joined"); match res { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("conn task panicked")?, Ok((peer, out)) => self.handle_conn_output(peer, out).await?, } + if self.shutting_down && self.conn_tasks.is_empty() { + debug!("all connections gracefully terminated"); + break; + } } else => break, } } + if let Some(reply) = shutdown_reply { + reply.send(()).ok(); + } Ok(()) } @@ -261,14 +289,21 @@ impl PeerManager { #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] fn handle_session_event(&mut self, peer: NodeId, event: SessionEvent) { - trace!(peer=%peer.fmt_short(), ?event, "session event"); + trace!(?event, "session event"); match event { SessionEvent::Established => {} - SessionEvent::Complete { result } => { - debug!(?result, "session complete"); - // TODO: I don't think we need to do anything here. The connection tasks terminate by themselves: - // The send loops are closed from `session::run` via `ChannelSenders::close_all`, - // and the receive loops terminate once the other side closes their send loops. + SessionEvent::Complete { + result, + who_cancelled, + senders, + } => { + debug!(error=?result.err(), ?who_cancelled, "session complete"); + senders.close_all(); + let Some(peer_info) = self.peers.get_mut(&peer) else { + warn!("got session complete for unknown peer"); + return; + }; + peer_info.who_cancelled = Some(who_cancelled); } } } @@ -299,6 +334,11 @@ impl PeerManager { initial_transmission, channel_streams, }) => { + if self.shutting_down { + debug!("connection became ready while shutting down, abort"); + conn.close(ERROR_CODE_IGNORE_CONN.into(), b"shutting-down"); + return Ok(()); + } // TODO: Here we should check again that we are not establishing a duplicate connection. debug!("connection ready: init session"); let (channels, fut) = prepare_channels(channel_streams)?; @@ -329,40 +369,43 @@ impl PeerManager { peer_info.abort_handle = Some(abort_handle); } Ok(ConnStep::Done { conn }) => { - debug!("connection loop finished"); - // TODO: Instead of using our role (alfie vs. betty), the party who sent the last - // meaningful message should wait for the other end to terminate the connection, I think. - // - // In other words, the connection may only be closed by the party who received the last meaningful message. - if peer_info.our_role == Role::Alfie { - conn.close(ERROR_CODE_OK.into(), b"bye"); - } + trace!("connection loop finished"); + let who_cancelled = peer_info + .who_cancelled + .take() + .unwrap_or(WhoCancelled::NoneDid); + let me = self.endpoint.node_id(); let fut = async move { + let we_close_first = match who_cancelled { + WhoCancelled::WeDid => false, + WhoCancelled::TheyDid => true, + WhoCancelled::BothDid => me > peer, + WhoCancelled::NoneDid => true, + }; + if we_close_first { + conn.close(ERROR_CODE_OK.into(), b"bye"); + } let reason = conn.closed().await; + let is_graceful = match &reason { + ConnectionError::LocallyClosed if we_close_first => true, + ConnectionError::ApplicationClosed(frame) + if frame.error_code == ERROR_CODE_OK.into() => + { + !we_close_first || who_cancelled == WhoCancelled::NoneDid + } + _ => false, + }; + if !is_graceful { + warn!(?reason, "connection was not closed gracefully"); + } else { + debug!("connection closed gracefully"); + } Ok(ConnStep::Closed { conn, reason }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); peer_info.abort_handle = Some(abort_handle); } - Ok(ConnStep::Closed { reason, conn }) => { - // TODO: Instead of using our role (alfie vs. betty), the party who sent the last - // meaningful message should wait for the other end to terminate the connection, I think. - let locally_closed = peer_info.our_role == Role::Alfie; - let is_graceful = match &reason { - ConnectionError::LocallyClosed if locally_closed => true, - ConnectionError::ApplicationClosed(frame) - if !locally_closed && frame.error_code == ERROR_CODE_OK.into() => - { - true - } - _ => false, - }; - if !is_graceful { - warn!(?reason, "connection was not closed gracefully"); - } else { - debug!("connection closed gracefully"); - } - + Ok(ConnStep::Closed { reason: _, conn }) => { self.peers.remove(&peer); drop(conn); } @@ -370,12 +413,8 @@ impl PeerManager { Ok(()) } - /// Shuts down all connection tasks. - /// - /// Attempts to shutdown connections for active peers gracefully within [`GRACEFUL_SHUTDOWN_TIMEOUT`]. - /// Aborts connections for not-yet-active peers immediately. - /// Aborts all connections after the graceful timeout elapsed. - async fn handle_shutdown(&mut self) { + fn init_shutdown(&mut self) { + self.shutting_down = true; for peer in self.peers.values() { match &peer.state { PeerState::None => {} @@ -392,47 +431,6 @@ impl PeerManager { } } } - - let join_conns_fut = async { - while let Some(res) = self.conn_tasks.join_next().await { - trace!("tick: conn task joined"); - match res { - Err(err) if err.is_cancelled() => continue, - Err(err) => { - error!(?err, "conn task panicked during shutdown"); - } - Ok((peer, out)) => { - match &out { - Err(_) | Ok(ConnStep::Done { .. }) | Ok(ConnStep::Closed { .. }) => { - if let Err(err) = self.handle_conn_output(peer, out).await { - error!(?err, "conn task output error during shutdown"); - } - } - _ => { - // We should not reach this state, as we abort all tasks that lead to output other than done or close. - // However, tokio docs for `AbortHandle::abort` state that cancelling a task that might - // already have been completed only triggers a cancelled JoinError *most likely*... - warn!(?peer, ?out, "expected tasks that lead to output other than done or close to be aborted"); - } - } - } - } - } - }; - - match tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, join_conns_fut).await { - Ok(()) => { - debug!("all connections gracefully terminated"); - } - Err(_) => { - debug!( - remaining=self.conn_tasks.len(), - "terminating all connections at shutdown timed out, abort remaining connections" - ); - // TODO: We do not catch panics here. - self.conn_tasks.shutdown().await; - } - } } } @@ -456,6 +454,7 @@ struct PeerInfo { state: PeerState, pending_intents: Vec, span: Span, + who_cancelled: Option, } impl PeerInfo { @@ -467,6 +466,7 @@ impl PeerInfo { state: PeerState::None, pending_intents: Vec::new(), span: error_span!("conn", peer=%peer.fmt_short()), + who_cancelled: None, } } } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 781ba7bed2..64c206e773 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -429,6 +429,8 @@ pub enum Message { ControlApologise(ControlApologise), #[debug("{:?}", _0)] ControlFreeHandle(ControlFreeHandle), + RequestClose, + ConfirmClose, } impl Message { @@ -520,7 +522,9 @@ impl Message { | Message::ControlPlead(_) | Message::ControlAnnounceDropping(_) | Message::ControlApologise(_) - | Message::ControlFreeHandle(_) => Channel::Control, + | Message::ControlFreeHandle(_) + | Message::RequestClose + | Message::ConfirmClose => Channel::Control, } } } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index f04e0db2cf..94e9535e42 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -3,6 +3,7 @@ use std::{ sync::Arc, }; +use channels::ChannelSenders; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; @@ -78,6 +79,10 @@ impl Interests { pub fn builder() -> SelectBuilder { SelectBuilder::default() } + + pub fn all() -> Self { + Self::All + } } #[derive(Default, Debug)] @@ -176,10 +181,23 @@ impl EventSender { } } -#[derive(Debug)] +#[derive(derive_more::Debug)] pub enum SessionEvent { Established, - Complete { result: Result<(), Arc> }, + Complete { + result: Result<(), Arc>, + who_cancelled: WhoCancelled, + #[debug("ChannelSenders")] + senders: ChannelSenders, + }, +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum WhoCancelled { + WeDid, + TheyDid, + BothDid, + NoneDid, } #[derive(Debug)] @@ -195,7 +213,7 @@ impl SessionHandle { /// Returns an error if the session failed to complete. pub async fn complete(&mut self) -> Result<(), Arc> { while let Some(event) = self.event_rx.recv().await { - if let SessionEvent::Complete { result } = event { + if let SessionEvent::Complete { result, .. } = event { return result; } } diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index b22029bf8b..e6f3894ba3 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -144,7 +144,7 @@ impl Intent { } } -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq)] pub enum Completion { /// All interests were reconciled. Complete, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index b542bb4691..826b7fa996 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,4 +1,10 @@ -use std::{future::Future, sync::Arc}; +use std::{ + future::Future, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; use futures_concurrency::{future::TryJoin, stream::StreamExt as _}; use futures_lite::{Stream, StreamExt as _}; @@ -20,7 +26,7 @@ use crate::{ pai_finder::{self as pai, PaiFinder}, reconciler, static_tokens::StaticTokens, - Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, + Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, WhoCancelled, }, store::{traits::Storage, Store}, util::{channel::Receiver, stream::Cancelable}, @@ -81,7 +87,7 @@ pub(crate) async fn run_session( debug!(role = ?our_role, ?mode, "start session"); // Make all our receivers close once the cancel_token is triggered. - let control_recv = Cancelable::new(control_recv, cancel_token.clone()); + // let control_recv = Cancelable::new(control_recv, cancel_token.clone()); let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); @@ -291,6 +297,9 @@ pub(crate) async fn run_session( Ok(()) }); + let we_cancelled = AtomicBool::new(false); + let mut who_cancelled = WhoCancelled::NoneDid; + let control_loop = with_span(error_span!("control"), async { let res = control_loop( control_recv, @@ -299,13 +308,23 @@ pub(crate) async fn run_session( &channel_sender, &pai_inbox, &event_sender, + &we_cancelled, ) .await; if !cancel_token.is_cancelled() { - debug!("close session (control channel closed)"); + debug!("close session (closed by peer)"); cancel_token.cancel(); } - res + match res { + Ok(who) => { + who_cancelled = who; + Ok(()) + } + Err(err) => { + who_cancelled = WhoCancelled::NoneDid; + Err(err) + } + } }); let aoi_recv_loop = with_span(error_span!("aoi_recv"), async { @@ -327,6 +346,13 @@ pub(crate) async fn run_session( Ok(()) }); + let cancel_fut = async { + cancel_token.cancelled().await; + we_cancelled.store(true, Ordering::Relaxed); + channel_sender.send(Message::RequestClose).await?; + Ok(()) + }; + let result = ( intents_fut, control_loop, @@ -338,6 +364,7 @@ pub(crate) async fn run_session( token_recv_loop, caps_recv_loop, aoi_recv_loop, + cancel_fut, ) .try_join() .await; @@ -350,20 +377,18 @@ pub(crate) async fn run_session( // Unsubscribe from the store. This stops the data send task. store.entries().unsubscribe(&session_id); - // Close our channel senders. - // This will stop the network send loop after all pending data has been sent. - channel_sender.close_all(); - event_sender .send(SessionEvent::Complete { result: result.clone(), + who_cancelled, + senders: channel_sender, }) .await .ok(); match result { Ok(_) => { - debug!("session complete"); + debug!(?who_cancelled, "session complete"); Ok(()) } Err(error) => { @@ -375,13 +400,14 @@ pub(crate) async fn run_session( } async fn control_loop( - mut control_recv: Cancelable>, + mut control_recv: Receiver, our_role: Role, caps: &Capabilities, sender: &ChannelSenders, pai_inbox: &Sender, event_sender: &EventSender, -) -> Result<(), Error> { + we_cancelled: &AtomicBool, +) -> Result { // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; sender.send(reveal_message).await?; @@ -395,6 +421,8 @@ async fn control_loop( sender.send(msg).await?; } + let mut who_cancelled = WhoCancelled::NoneDid; + // Handle incoming messages on the control channel. while let Some(message) = control_recv.try_next().await? { match message { @@ -428,11 +456,26 @@ async fn control_loop( )) .await?; } + Message::RequestClose => { + debug!("received close request"); + if we_cancelled.load(Ordering::Relaxed) { + who_cancelled = WhoCancelled::BothDid; + break; + } else { + who_cancelled = WhoCancelled::TheyDid; + sender.send(Message::ConfirmClose).await?; + break; + } + } + Message::ConfirmClose => { + who_cancelled = WhoCancelled::WeDid; + break; + } _ => return Err(Error::UnsupportedMessage), } } - Ok(()) + Ok(who_cancelled) } fn cancelable_channel( diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 6b9ffd78d3..607816bd45 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,10 +1,15 @@ +use std::time::Duration; + use anyhow::Result; use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; use iroh_willow::{ proto::{grouping::Area, willow::Path}, - session::{intents::EventKind, Interests, SessionInit, SessionMode}, + session::{ + intents::{Completion, EventKind}, + Interests, SessionInit, SessionMode, + }, }; use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; @@ -106,8 +111,45 @@ async fn peer_manager_two_intents() -> Result<()> { task_foo_path.await.unwrap(); task_bar_path.await.unwrap(); + // tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + [alfie, betty].map(Peer::shutdown).try_join().await?; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_immediate() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_update_intent"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_timeout() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_update_intent"); + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); + tokio::time::sleep(Duration::from_secs(1)).await; + [alfie, betty].map(Peer::shutdown).try_join().await?; Ok(()) } From d3f4ab55e57d0fe97e9fdaa9601486ec43bf7631 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 11:20:35 +0200 Subject: [PATCH 107/198] refactor: move graceful connection termination out of WGPS protocol --- iroh-willow/src/engine/peer_manager.rs | 62 +++++++------------- iroh-willow/src/net.rs | 79 +++++++++++++++++++++++++- iroh-willow/src/proto/sync.rs | 6 +- iroh-willow/src/session.rs | 11 +--- iroh-willow/src/session/run.rs | 70 +++++------------------ 5 files changed, 112 insertions(+), 116 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index c790f06e25..a2dc5756cd 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -20,22 +20,19 @@ use tokio_util::{either::Either, sync::CancellationToken}; use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ - net::{establish, prepare_channels, ChannelStreams, ConnHandle, ALPN}, + net::{ + establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, + ERROR_CODE_IGNORE_CONN, + }, proto::sync::{AccessChallenge, InitialTransmission}, session::{ intents::{EventKind, Intent}, Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, - WhoCancelled, }, }; use super::actor::ActorHandle; -/// Our QUIC application error code for graceful connection termination. -const ERROR_CODE_OK: u32 = 1; -/// Our QUIC application error code when closing connections during establishment -/// because we prefer another existing connection to the same peer. -const ERROR_CODE_IGNORE_CONN: u32 = 2; /// Timeout at shutdown after which we abort connections that failed to terminate gracefully. const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); @@ -294,16 +291,16 @@ impl PeerManager { SessionEvent::Established => {} SessionEvent::Complete { result, - who_cancelled, + we_cancelled, senders, } => { - debug!(error=?result.err(), ?who_cancelled, "session complete"); + trace!(error=?result.err(), ?we_cancelled, "session complete"); senders.close_all(); let Some(peer_info) = self.peers.get_mut(&peer) else { warn!("got session complete for unknown peer"); return; }; - peer_info.who_cancelled = Some(who_cancelled); + peer_info.we_cancelled = we_cancelled; } } } @@ -370,42 +367,20 @@ impl PeerManager { } Ok(ConnStep::Done { conn }) => { trace!("connection loop finished"); - let who_cancelled = peer_info - .who_cancelled - .take() - .unwrap_or(WhoCancelled::NoneDid); + let we_cancelled = peer_info.we_cancelled; let me = self.endpoint.node_id(); let fut = async move { - let we_close_first = match who_cancelled { - WhoCancelled::WeDid => false, - WhoCancelled::TheyDid => true, - WhoCancelled::BothDid => me > peer, - WhoCancelled::NoneDid => true, - }; - if we_close_first { - conn.close(ERROR_CODE_OK.into(), b"bye"); - } - let reason = conn.closed().await; - let is_graceful = match &reason { - ConnectionError::LocallyClosed if we_close_first => true, - ConnectionError::ApplicationClosed(frame) - if frame.error_code == ERROR_CODE_OK.into() => - { - !we_close_first || who_cancelled == WhoCancelled::NoneDid - } - _ => false, - }; - if !is_graceful { - warn!(?reason, "connection was not closed gracefully"); - } else { - debug!("connection closed gracefully"); - } - Ok(ConnStep::Closed { conn, reason }) + let error = terminate_gracefully(&conn, me, peer, we_cancelled).await?; + Ok(ConnStep::Closed { conn, error }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); peer_info.abort_handle = Some(abort_handle); } - Ok(ConnStep::Closed { reason: _, conn }) => { + Ok(ConnStep::Closed { error, conn }) => { + match error { + None => debug!("connection closed gracefully"), + Some(error) => warn!(?error, "failed to close connection gracefully"), + } self.peers.remove(&peer); drop(conn); } @@ -454,7 +429,8 @@ struct PeerInfo { state: PeerState, pending_intents: Vec, span: Span, - who_cancelled: Option, + // who_cancelled: Option, + we_cancelled: bool, } impl PeerInfo { @@ -466,7 +442,7 @@ impl PeerInfo { state: PeerState::None, pending_intents: Vec::new(), span: error_span!("conn", peer=%peer.fmt_short()), - who_cancelled: None, + we_cancelled: false, } } } @@ -493,7 +469,7 @@ enum ConnStep { }, Closed { conn: Connection, - reason: ConnectionError, + error: Option, }, } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 825e97e477..fe75228553 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,10 +1,10 @@ use std::future::Future; -use anyhow::{ensure, Context as _, Result}; +use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; -use iroh_net::endpoint::{Connection, RecvStream, SendStream}; +use iroh_net::endpoint::{Connection, ConnectionError, RecvStream, SendStream}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{debug, trace}; @@ -30,6 +30,12 @@ pub const CHANNEL_CAP: usize = 1024 * 64; /// The ALPN protocol name for iroh-willow. pub const ALPN: &[u8] = b"iroh-willow/0"; +/// Our QUIC application error code for graceful connection termination. +pub const ERROR_CODE_OK: u32 = 1; +/// Our QUIC application error code when closing connections during establishment +/// because we prefer another existing connection to the same peer. +pub const ERROR_CODE_IGNORE_CONN: u32 = 2; + /// The handle to an active peer connection. /// /// This is passed into the session loop, where it is used to send and receive messages @@ -250,6 +256,75 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul Ok(()) } +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +enum WhoCancelled { + WeDid, + TheyDid, + BothDid, + NoneDid, +} + +pub(crate) async fn terminate_gracefully( + conn: &Connection, + me: NodeId, + peer: NodeId, + we_cancelled: bool, +) -> Result> { + trace!(?we_cancelled, "terminating connection"); + let send = async { + let mut send_stream = conn.open_uni().await?; + let data = if we_cancelled { 1u8 } else { 0u8 }; + send_stream.write_u8(data).await?; + send_stream.finish().await?; + Ok(()) + }; + + let recv = async { + let mut recv_stream = conn.accept_uni().await?; + let data = recv_stream.read_u8().await?; + recv_stream.read_to_end(0).await?; + let they_cancelled = match data { + 0 => false, + 1 => true, + _ => return Err(anyhow!("received unexpected closing byte from peer")), + }; + Ok(they_cancelled) + }; + + let (_, they_cancelled) = (send, recv).try_join().await?; + + let who_cancelled = match (we_cancelled, they_cancelled) { + (true, false) => WhoCancelled::WeDid, + (false, true) => WhoCancelled::TheyDid, + (true, true) => WhoCancelled::BothDid, + (false, false) => WhoCancelled::NoneDid, + }; + + let we_close_first = match who_cancelled { + WhoCancelled::WeDid => false, + WhoCancelled::TheyDid => true, + WhoCancelled::BothDid => me > peer, + WhoCancelled::NoneDid => true, + }; + debug!(?who_cancelled, "connection complete"); + if we_close_first { + conn.close(ERROR_CODE_OK.into(), b"bye"); + } + let reason = conn.closed().await; + let is_graceful = match &reason { + ConnectionError::LocallyClosed if we_close_first => true, + ConnectionError::ApplicationClosed(frame) if frame.error_code == ERROR_CODE_OK.into() => { + !we_close_first || who_cancelled == WhoCancelled::NoneDid + } + _ => false, + }; + if !is_graceful { + Ok(Some(reason)) + } else { + Ok(None) + } +} + #[cfg(test)] mod tests { use std::{ diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 64c206e773..781ba7bed2 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -429,8 +429,6 @@ pub enum Message { ControlApologise(ControlApologise), #[debug("{:?}", _0)] ControlFreeHandle(ControlFreeHandle), - RequestClose, - ConfirmClose, } impl Message { @@ -522,9 +520,7 @@ impl Message { | Message::ControlPlead(_) | Message::ControlAnnounceDropping(_) | Message::ControlApologise(_) - | Message::ControlFreeHandle(_) - | Message::RequestClose - | Message::ConfirmClose => Channel::Control, + | Message::ControlFreeHandle(_) => Channel::Control, } } } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 94e9535e42..c4f712c6c7 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -186,20 +186,13 @@ pub enum SessionEvent { Established, Complete { result: Result<(), Arc>, - who_cancelled: WhoCancelled, + // who_cancelled: WhoCancelled, + we_cancelled: bool, #[debug("ChannelSenders")] senders: ChannelSenders, }, } -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum WhoCancelled { - WeDid, - TheyDid, - BothDid, - NoneDid, -} - #[derive(Debug)] pub struct SessionHandle { pub cancel_token: CancellationToken, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 826b7fa996..4b7edf1171 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,10 +1,4 @@ -use std::{ - future::Future, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; +use std::{future::Future, sync::Arc}; use futures_concurrency::{future::TryJoin, stream::StreamExt as _}; use futures_lite::{Stream, StreamExt as _}; @@ -26,7 +20,7 @@ use crate::{ pai_finder::{self as pai, PaiFinder}, reconciler, static_tokens::StaticTokens, - Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, WhoCancelled, + Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, }, store::{traits::Storage, Store}, util::{channel::Receiver, stream::Cancelable}, @@ -87,7 +81,7 @@ pub(crate) async fn run_session( debug!(role = ?our_role, ?mode, "start session"); // Make all our receivers close once the cancel_token is triggered. - // let control_recv = Cancelable::new(control_recv, cancel_token.clone()); + let control_recv = Cancelable::new(control_recv, cancel_token.clone()); let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); @@ -297,8 +291,7 @@ pub(crate) async fn run_session( Ok(()) }); - let we_cancelled = AtomicBool::new(false); - let mut who_cancelled = WhoCancelled::NoneDid; + let mut we_cancelled = false; let control_loop = with_span(error_span!("control"), async { let res = control_loop( @@ -308,23 +301,15 @@ pub(crate) async fn run_session( &channel_sender, &pai_inbox, &event_sender, - &we_cancelled, ) .await; if !cancel_token.is_cancelled() { debug!("close session (closed by peer)"); cancel_token.cancel(); + } else { + we_cancelled = true; } - match res { - Ok(who) => { - who_cancelled = who; - Ok(()) - } - Err(err) => { - who_cancelled = WhoCancelled::NoneDid; - Err(err) - } - } + res }); let aoi_recv_loop = with_span(error_span!("aoi_recv"), async { @@ -346,13 +331,6 @@ pub(crate) async fn run_session( Ok(()) }); - let cancel_fut = async { - cancel_token.cancelled().await; - we_cancelled.store(true, Ordering::Relaxed); - channel_sender.send(Message::RequestClose).await?; - Ok(()) - }; - let result = ( intents_fut, control_loop, @@ -364,7 +342,6 @@ pub(crate) async fn run_session( token_recv_loop, caps_recv_loop, aoi_recv_loop, - cancel_fut, ) .try_join() .await; @@ -380,19 +357,16 @@ pub(crate) async fn run_session( event_sender .send(SessionEvent::Complete { result: result.clone(), - who_cancelled, + we_cancelled, senders: channel_sender, }) .await .ok(); + debug!(error=?result.as_ref().err(), ?we_cancelled, "session complete"); match result { - Ok(_) => { - debug!(?who_cancelled, "session complete"); - Ok(()) - } + Ok(()) => Ok(()), Err(error) => { - debug!(?error, "session failed"); intents.abort_all(error.clone()).await; Err(error) } @@ -400,14 +374,13 @@ pub(crate) async fn run_session( } async fn control_loop( - mut control_recv: Receiver, + mut control_recv: Cancelable>, our_role: Role, caps: &Capabilities, sender: &ChannelSenders, pai_inbox: &Sender, event_sender: &EventSender, - we_cancelled: &AtomicBool, -) -> Result { +) -> Result<(), Error> { // Reveal our nonce. let reveal_message = caps.reveal_commitment()?; sender.send(reveal_message).await?; @@ -421,8 +394,6 @@ async fn control_loop( sender.send(msg).await?; } - let mut who_cancelled = WhoCancelled::NoneDid; - // Handle incoming messages on the control channel. while let Some(message) = control_recv.try_next().await? { match message { @@ -456,26 +427,11 @@ async fn control_loop( )) .await?; } - Message::RequestClose => { - debug!("received close request"); - if we_cancelled.load(Ordering::Relaxed) { - who_cancelled = WhoCancelled::BothDid; - break; - } else { - who_cancelled = WhoCancelled::TheyDid; - sender.send(Message::ConfirmClose).await?; - break; - } - } - Message::ConfirmClose => { - who_cancelled = WhoCancelled::WeDid; - break; - } _ => return Err(Error::UnsupportedMessage), } } - Ok(who_cancelled) + Ok(()) } fn cancelable_channel( From c9fc2ab2ed7f92ed5e69af231038c29bb9ed5216 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 12:07:14 +0200 Subject: [PATCH 108/198] cleanups and test fixes --- iroh-willow/src/engine/peer_manager.rs | 75 +++++++++++++------- iroh-willow/src/net.rs | 94 +++++++++++++++++++------- iroh-willow/src/session.rs | 14 ++-- iroh-willow/src/session/error.rs | 2 + iroh-willow/src/session/run.rs | 11 +-- iroh-willow/tests/basic.rs | 76 +++++++++++---------- 6 files changed, 178 insertions(+), 94 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index a2dc5756cd..0a652e4f1e 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, future::Future, sync::Arc, time::Duration}; -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Result}; use futures_buffered::join_all; use futures_lite::{future::Boxed, StreamExt}; @@ -231,11 +231,12 @@ impl PeerManager { let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); peer_info.abort_handle = Some(abort_handle); peer_info.our_role = Role::Betty; - peer_info.pending_intents.push(intent); - peer_info.state = PeerState::Pending; + peer_info.state = PeerState::Pending { + intents: vec![intent], + }; } - PeerState::Pending => { - peer_info.pending_intents.push(intent); + PeerState::Pending { ref mut intents } => { + intents.push(intent); debug!("ignore incoming connection (already pending)"); conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-already-active"); } @@ -269,11 +270,12 @@ impl PeerManager { }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); peer_info.abort_handle = Some(abort_handle); - peer_info.pending_intents.push(intent); - peer_info.state = PeerState::Pending; + peer_info.state = PeerState::Pending { + intents: vec![intent], + }; } - PeerState::Pending => { - peer_info.pending_intents.push(intent); + PeerState::Pending { ref mut intents } => { + intents.push(intent); } PeerState::Active { ref update_tx, .. } => { if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { @@ -295,11 +297,14 @@ impl PeerManager { senders, } => { trace!(error=?result.err(), ?we_cancelled, "session complete"); + // Close the channel senders. This will cause our send loops to close, + // which in turn causes the receive loops of the other peer to close. senders.close_all(); let Some(peer_info) = self.peers.get_mut(&peer) else { warn!("got session complete for unknown peer"); return; }; + // Store whether we initiated the termination. We will need this for the graceful termination logic later. peer_info.we_cancelled = we_cancelled; } } @@ -315,38 +320,59 @@ impl PeerManager { match out { Err(err) => { debug!(peer=%peer.fmt_short(), ?err, "conn task failed"); - let err = Arc::new(Error::Net(err)); let peer = self.peers.remove(&peer).expect("just checked"); - join_all( - peer.pending_intents - .into_iter() - .map(|intent| intent.send_abort(err.clone())), - ) - .await; + // If we were still in pending state, terminate all pending intents. + if let PeerState::Pending { intents } = peer.state { + let err = Arc::new(Error::Net(err)); + join_all( + intents + .into_iter() + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } // We don't need to cancel the session here. It will terminate because all receiver channels are closed. - return Ok(()); } Ok(ConnStep::Ready { conn, initial_transmission, channel_streams, }) => { + let PeerState::Pending { ref mut intents } = &mut peer_info.state else { + drop(conn); + // TODO: unreachable? + return Err(anyhow!( + "got connection ready for peer in non-pending state" + )); + }; + + let intents = std::mem::take(intents); + if self.shutting_down { debug!("connection became ready while shutting down, abort"); conn.close(ERROR_CODE_IGNORE_CONN.into(), b"shutting-down"); + if !intents.is_empty() { + let err = Arc::new(Error::ShuttingDown); + join_all( + intents + .into_iter() + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } return Ok(()); } + // TODO: Here we should check again that we are not establishing a duplicate connection. debug!("connection ready: init session"); let (channels, fut) = prepare_channels(channel_streams)?; - let willow_conn = ConnHandle { + let conn_handle = ConnHandle { initial_transmission, channels, our_role: peer_info.our_role, peer, }; - let intents = std::mem::take(&mut peer_info.pending_intents); - let session_handle = self.actor.init_session(willow_conn, intents).await?; + let session_handle = self.actor.init_session(conn_handle, intents).await?; let fut = fut.map_ok(move |()| ConnStep::Done { conn }); let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); @@ -393,7 +419,7 @@ impl PeerManager { for peer in self.peers.values() { match &peer.state { PeerState::None => {} - PeerState::Pending => { + PeerState::Pending { .. } => { // We are in pending state, which means the session has not yet been started. // Hard-abort the task and let the other peer handle the error. if let Some(abort_handle) = &peer.abort_handle { @@ -427,9 +453,7 @@ struct PeerInfo { our_role: Role, abort_handle: Option, state: PeerState, - pending_intents: Vec, span: Span, - // who_cancelled: Option, we_cancelled: bool, } @@ -440,7 +464,6 @@ impl PeerInfo { our_role, abort_handle: None, state: PeerState::None, - pending_intents: Vec::new(), span: error_span!("conn", peer=%peer.fmt_short()), we_cancelled: false, } @@ -450,7 +473,9 @@ impl PeerInfo { #[derive(Debug)] enum PeerState { None, - Pending, + Pending { + intents: Vec, + }, Active { cancel_token: CancellationToken, update_tx: mpsc::Sender, diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index fe75228553..615f2d0179 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -57,6 +57,7 @@ pub(crate) async fn establish( our_role: Role, our_nonce: AccessChallenge, ) -> Result<(InitialTransmission, ChannelStreams)> { + debug!(?our_role, "establishing connection"); // Run the initial transmission (which works on uni streams) concurrently // with opening/accepting the bi streams for the channels. ( @@ -71,7 +72,6 @@ async fn initial_transmission( conn: &Connection, our_nonce: AccessChallenge, ) -> Result { - debug!("start initial transmission"); let challenge_hash = our_nonce.hash(); let mut send_stream = conn.open_uni().await?; send_stream.write_u8(MAX_PAYLOAD_SIZE_POWER).await?; @@ -256,14 +256,6 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul Ok(()) } -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -enum WhoCancelled { - WeDid, - TheyDid, - BothDid, - NoneDid, -} - pub(crate) async fn terminate_gracefully( conn: &Connection, me: NodeId, @@ -293,6 +285,14 @@ pub(crate) async fn terminate_gracefully( let (_, they_cancelled) = (send, recv).try_join().await?; + #[derive(Debug)] + enum WhoCancelled { + WeDid, + TheyDid, + BothDid, + NoneDid, + } + let who_cancelled = match (we_cancelled, they_cancelled) { (true, false) => WhoCancelled::WeDid, (false, true) => WhoCancelled::TheyDid, @@ -314,7 +314,7 @@ pub(crate) async fn terminate_gracefully( let is_graceful = match &reason { ConnectionError::LocallyClosed if we_close_first => true, ConnectionError::ApplicationClosed(frame) if frame.error_code == ERROR_CODE_OK.into() => { - !we_close_first || who_cancelled == WhoCancelled::NoneDid + !we_close_first || matches!(who_cancelled, WhoCancelled::NoneDid) } _ => false, }; @@ -325,6 +325,10 @@ pub(crate) async fn terminate_gracefully( } } +/// This test module contains two integration tests for the net and session run module. +/// +/// They were written before the peer_manager module existed, and thus are quite verbose. +/// Still going to keep them around for now as a safe guard. #[cfg(test)] mod tests { use std::{ @@ -344,7 +348,7 @@ mod tests { auth::{CapSelector, DelegateTo, RestrictArea}, engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, - net::ConnHandle, + net::{terminate_gracefully, ConnHandle}, proto::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId}, @@ -371,14 +375,14 @@ mod tests { our_role: Role, our_nonce: AccessChallenge, intents: Vec, - ) -> Result { + ) -> Result<(SessionHandle, tokio::task::JoinHandle>)> { let peer = iroh_net::endpoint::get_remote_node_id(&conn)?; let span = tracing::error_span!("conn", me=%me.fmt_short(), peer=%peer.fmt_short()); let (initial_transmission, channel_streams) = establish(&conn, our_role, our_nonce) .instrument(span.clone()) .await?; let (channels, fut) = prepare_channels(channel_streams)?; - tokio::task::spawn(fut.instrument(span)); + let net_task = tokio::task::spawn(fut.instrument(span)); let willow_conn = ConnHandle { initial_transmission, our_role, @@ -386,7 +390,7 @@ mod tests { channels, }; let handle = actor.init_session(willow_conn, intents).await?; - Ok(handle) + Ok((handle, net_task)) } #[tokio::test(flavor = "multi_thread")] @@ -465,7 +469,7 @@ mod tests { run( node_id_alfie, handle_alfie.clone(), - conn_alfie, + conn_alfie.clone(), Role::Alfie, nonce_alfie, vec![intent_alfie] @@ -473,14 +477,14 @@ mod tests { run( node_id_betty, handle_betty.clone(), - conn_betty, + conn_betty.clone(), Role::Betty, nonce_betty, vec![intent_betty] ) ); - let mut session_alfie = session_alfie?; - let mut session_betty = session_betty?; + let (mut session_alfie, net_task_alfie) = session_alfie?; + let (mut session_betty, net_task_betty) = session_betty?; let (res_alfie, res_betty) = tokio::join!( intent_handle_alfie.complete(), @@ -495,11 +499,27 @@ mod tests { tokio::join!(session_alfie.complete(), session_betty.complete()); info!("alfie session res {:?}", res_alfie); info!("betty session res {:?}", res_betty); - assert!(res_alfie.is_ok()); - assert!(res_betty.is_ok()); info!(time=?start.elapsed(), "reconciliation finished"); + let (senders_alfie, alfie_cancelled) = res_alfie.unwrap(); + let (senders_betty, betty_cancelled) = res_betty.unwrap(); + senders_alfie.close_all(); + senders_betty.close_all(); + + let (r1, r2) = tokio::try_join!(net_task_alfie, net_task_betty) + .expect("failed to close connection loops"); + r1.unwrap(); + r2.unwrap(); + + let (error_alfie, error_betty) = tokio::try_join!( + terminate_gracefully(&conn_alfie, node_id_alfie, node_id_betty, alfie_cancelled), + terminate_gracefully(&conn_betty, node_id_betty, node_id_alfie, betty_cancelled), + ) + .expect("failed to close both connections gracefully"); + assert_eq!(error_alfie, None); + assert_eq!(error_betty, None); + let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; let betty_entries = get_entries(&handle_betty, namespace_id).await?; info!("alfie has now {} entries", alfie_entries.len()); @@ -615,7 +635,7 @@ mod tests { run( node_id_alfie, handle_alfie.clone(), - conn_alfie, + conn_alfie.clone(), Role::Alfie, nonce_alfie, vec![intent_alfie] @@ -623,20 +643,26 @@ mod tests { run( node_id_betty, handle_betty.clone(), - conn_betty, + conn_betty.clone(), Role::Betty, nonce_betty, vec![intent_betty] ) ); - let mut session_alfie = session_alfie?; - let mut session_betty = session_betty?; + let (mut session_alfie, net_task_alfie) = session_alfie?; + let (mut session_betty, net_task_betty) = session_betty?; let live_entries = done_rx.await?; expected_entries.extend(live_entries); // TODO: replace with event tokio::time::sleep(Duration::from_secs(1)).await; + session_alfie.close(); + let (senders_alfie, alfie_cancelled) = session_alfie + .complete() + .await + .expect("failed to close alfie session"); + senders_alfie.close_all(); let (res_alfie, res_betty) = tokio::join!( intent_handle_alfie.complete(), @@ -648,8 +674,24 @@ mod tests { assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - let (res_alfie, res_betty) = - tokio::join!(session_alfie.complete(), session_betty.complete()); + let (senders_betty, betty_cancelled) = session_betty + .complete() + .await + .expect("failed to close alfie session"); + senders_betty.close_all(); + + let (r1, r2) = tokio::try_join!(net_task_alfie, net_task_betty) + .expect("failed to close connection loops"); + r1.unwrap(); + r2.unwrap(); + + let (error_alfie, error_betty) = tokio::try_join!( + terminate_gracefully(&conn_alfie, node_id_alfie, node_id_betty, alfie_cancelled), + terminate_gracefully(&conn_betty, node_id_betty, node_id_alfie, betty_cancelled), + ) + .expect("failed to close both connections gracefully"); + assert_eq!(error_alfie, None); + assert_eq!(error_betty, None); info!("alfie session res {:?}", res_alfie); info!("betty session res {:?}", res_betty); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index c4f712c6c7..38b29a05cf 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -203,14 +203,20 @@ pub struct SessionHandle { impl SessionHandle { /// Wait for the session to finish. /// + /// Returns the channel senders and a boolean indicating if we cancelled the session. /// Returns an error if the session failed to complete. - pub async fn complete(&mut self) -> Result<(), Arc> { + pub async fn complete(&mut self) -> Result<(ChannelSenders, bool), Arc> { while let Some(event) = self.event_rx.recv().await { - if let SessionEvent::Complete { result, .. } = event { - return result; + if let SessionEvent::Complete { + result, + senders, + we_cancelled, + } = event + { + return result.map(|()| (senders, we_cancelled)); } } - Ok(()) + Err(Arc::new(Error::ActorFailed)) } /// Submit a new synchronisation intent. diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 530c5cd366..393167f209 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -73,6 +73,8 @@ pub enum Error { Net(anyhow::Error), #[error("channel receiver dropped")] ChannelDropped, + #[error("our node is shutting down")] + ShuttingDown, } #[derive(Debug, thiserror::Error)] diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 4b7edf1171..581227d0c9 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -6,7 +6,7 @@ use strum::IntoEnumIterator; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, trace, Instrument, Span}; +use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ net::ConnHandle, @@ -354,16 +354,19 @@ pub(crate) async fn run_session( // Unsubscribe from the store. This stops the data send task. store.entries().unsubscribe(&session_id); - event_sender + debug!(error=?result.as_ref().err(), ?we_cancelled, "session complete"); + + if let Err(_receiver_dropped) = event_sender .send(SessionEvent::Complete { result: result.clone(), we_cancelled, senders: channel_sender, }) .await - .ok(); + { + warn!("failed to send session complete event: receiver dropped"); + } - debug!(error=?result.as_ref().err(), ?we_cancelled, "session complete"); match result { Ok(()) => Ok(()), Err(error) => { diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 607816bd45..d76b5044d4 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -118,41 +118,6 @@ async fn peer_manager_two_intents() -> Result<()> { Ok(()) } -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_shutdown_immediate() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_update_intent"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - let mut intent = alfie - .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) - .await?; - let completion = intent.complete().await?; - assert_eq!(completion, Completion::Complete); - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_shutdown_timeout() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_update_intent"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - let mut intent = alfie - .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) - .await?; - let completion = intent.complete().await?; - assert_eq!(completion, Completion::Complete); - tokio::time::sleep(Duration::from_secs(1)).await; - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - #[tokio::test(flavor = "multi_thread")] async fn peer_manager_update_intent() -> Result<()> { iroh_test::logging::setup_multithreaded(); @@ -222,6 +187,47 @@ async fn peer_manager_update_intent() -> Result<()> { Ok(()) } +/// Test immediate shutdown. +// TODO: This does not really test much. Used it for log reading of graceful connection termination. +// Not sure where we should expose whether connections closed gracefully or not? +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_immediate() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_shutdown_immediate"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +/// Test shutdown after a timeout. +// TODO: This does not really test much. Used it for log reading of graceful connection termination. +// Not sure where we should expose whether connections closed gracefully or not? +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_timeout() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_shutdown_timeout"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); + tokio::time::sleep(Duration::from_secs(1)).await; + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + mod util { use std::sync::{Arc, Mutex}; From fa1e06aa33ada2728a44f96c8f04a816abe4b827 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 16:52:46 +0200 Subject: [PATCH 109/198] feat: properly deal with simultaneous connections, and restarting sessions --- iroh-willow/src/engine/peer_manager.rs | 337 +++++++++++++++++++------ iroh-willow/src/net.rs | 6 +- iroh-willow/src/session.rs | 4 + iroh-willow/src/session/intents.rs | 35 ++- iroh-willow/src/session/reconciler.rs | 23 +- iroh-willow/src/session/run.rs | 36 +-- iroh-willow/src/util/stream.rs | 3 + iroh-willow/tests/basic.rs | 46 +++- 8 files changed, 392 insertions(+), 98 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 0a652e4f1e..532612008a 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -22,7 +22,7 @@ use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ net::{ establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, - ERROR_CODE_IGNORE_CONN, + ERROR_CODE_DUPLICATE_CONN, }, proto::sync::{AccessChallenge, InitialTransmission}, session::{ @@ -118,7 +118,7 @@ pub(super) struct PeerManager { session_events_rx: StreamMap>, peers: HashMap, accept_handlers: AcceptHandlers, - conn_tasks: JoinSet<(NodeId, Result)>, + conn_tasks: JoinSet<(Role, NodeId, Result)>, shutting_down: bool, } @@ -142,9 +142,12 @@ impl PeerManager { } pub(super) async fn run(mut self) -> Result<(), Error> { - let mut shutdown_reply = None; + // A timeout that initially is always-pending. Once we initiate shutdown, it is set to an actual timeout, + // to not wait forever for graceful termination. let shutdown_timeout = Either::Left(std::future::pending::<()>()); tokio::pin!(shutdown_timeout); + let mut shutdown_reply = None; + loop { tokio::select! { Some(input) = self.inbox.recv(), if !self.shutting_down => { @@ -164,32 +167,32 @@ impl PeerManager { } } } - _ = &mut shutdown_timeout => { - trace!("tick: shutdown timeout"); - debug!( - remaining=self.conn_tasks.len(), - "terminating all connections timed out, abort remaining connections" - ); - // TODO: We do not catch panics here. - self.conn_tasks.shutdown().await; - break; - } - Some((session_id, event)) = self.session_events_rx.next(), if !self.session_events_rx.is_empty() => { - trace!(?session_id, ?event, "tick: event"); - self.handle_session_event(session_id, event); + Some((peer, event)) = self.session_events_rx.next(), if !self.session_events_rx.is_empty() => { + trace!(peer=%peer.fmt_short(), ?event, "tick: session event"); + self.handle_session_event(peer, event); } Some(res) = self.conn_tasks.join_next(), if !self.conn_tasks.is_empty() => { trace!(active=self.conn_tasks.len(), "tick: conn task joined"); match res { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("conn task panicked")?, - Ok((peer, out)) => self.handle_conn_output(peer, out).await?, + Ok((our_role, peer, out)) => self.handle_conn_output(our_role, peer, out).await?, } if self.shutting_down && self.conn_tasks.is_empty() { debug!("all connections gracefully terminated"); break; } } + _ = &mut shutdown_timeout => { + trace!("tick: shutdown timeout"); + debug!( + remaining=self.conn_tasks.len(), + "terminating all connections timed out, abort remaining connections" + ); + // TODO: We do not catch panics here. + self.conn_tasks.shutdown().await; + break; + } else => break, } } @@ -199,6 +202,7 @@ impl PeerManager { Ok(()) } + /// Handle a new incoming connection. async fn handle_connection(&mut self, conn: Connection) { let peer = match get_remote_node_id(&conn) { Ok(peer) => peer, @@ -207,6 +211,7 @@ impl PeerManager { return; } }; + let Some(intent) = self.accept_handlers.accept(peer).await else { debug!("ignore incoming connection (accept handler returned none)"); return; @@ -216,35 +221,82 @@ impl PeerManager { .entry(peer) .or_insert_with(|| PeerInfo::new(Role::Betty, peer)); - match peer_info.state { - PeerState::None => { - let our_nonce = AccessChallenge::generate(); - let fut = async move { - let (initial_transmission, channel_streams) = - establish(&conn, Role::Betty, our_nonce).await?; - Ok(ConnStep::Ready { - conn, - initial_transmission, - channel_streams, - }) - }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); - peer_info.abort_handle = Some(abort_handle); - peer_info.our_role = Role::Betty; - peer_info.state = PeerState::Pending { - intents: vec![intent], - }; - } - PeerState::Pending { ref mut intents } => { - intents.push(intent); - debug!("ignore incoming connection (already pending)"); - conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-already-active"); - } + debug!(peer = %peer.fmt_short(), our_state=%peer_info.state, "incoming connection"); + + let accept_conn = match peer_info.state { + PeerState::None => true, + PeerState::Pending { + ref mut cancel_dial, + .. + } => match peer_info.our_role { + Role::Betty => { + debug!("ignore incoming connection (already accepting)"); + conn.close( + ERROR_CODE_DUPLICATE_CONN.into(), + b"duplicate-already-pending", + ); + false + } + Role::Alfie => { + if peer > self.endpoint.node_id() { + debug!("incoming connection for a peer we are dialing and their connection wins, abort dial"); + if let Some(cancel_dial) = cancel_dial.take() { + cancel_dial.cancel(); + } + true + } else { + debug!("ignore incoming connection (already dialing and ours wins)"); + conn.close( + ERROR_CODE_DUPLICATE_CONN.into(), + b"duplicate-already-pending", + ); + false + } + } + }, PeerState::Active { .. } => { - // TODO: push betty intent to session? debug!("ignore incoming connection (already active)"); - conn.close(ERROR_CODE_IGNORE_CONN.into(), b"duplicate-already-active"); + conn.close( + ERROR_CODE_DUPLICATE_CONN.into(), + b"duplicate-already-active", + ); + false } + PeerState::Closing { .. } => true, + }; + if accept_conn { + debug!(peer=%peer.fmt_short(), "accept connection"); + // Take any pending intents from the previous state and merge with the new betty intent. + let mut intents = match peer_info.state { + PeerState::Pending { + ref mut intents, .. + } + | PeerState::Closing { + new_intents: ref mut intents, + .. + } => std::mem::take(intents), + _ => vec![], + }; + intents.push(intent); + peer_info.state = PeerState::Pending { + intents, + cancel_dial: None, + }; + peer_info.our_role = Role::Betty; + + // Start connection establish task. + let our_nonce = AccessChallenge::generate(); + let fut = async move { + let (initial_transmission, channel_streams) = + establish(&conn, Role::Betty, our_nonce).await?; + Ok(ConnStep::Ready { + conn, + initial_transmission, + channel_streams, + }) + }; + let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + peer_info.abort_handle = Some(abort_handle); } } @@ -254,14 +306,31 @@ impl PeerManager { .entry(peer) .or_insert_with(|| PeerInfo::new(Role::Alfie, peer)); + debug!(peer=%peer.fmt_short(), state=%peer_info.state, "submit intent"); + match peer_info.state { PeerState::None => { let our_nonce = AccessChallenge::generate(); let endpoint = self.endpoint.clone(); + let cancel_dial = CancellationToken::new(); + let cancel_dial2 = cancel_dial.clone(); + // Future that dials and establishes the connection. Can be cancelled for simultaneous connection. let fut = async move { - let conn = endpoint.connect_by_node_id(&peer, ALPN).await?; - let (initial_transmission, channel_streams) = - establish(&conn, Role::Alfie, our_nonce).await?; + let conn = tokio::select! { + res = endpoint.connect_by_node_id(&peer, ALPN) => res, + _ = cancel_dial.cancelled() => { + debug!("dial cancelled during dial"); + return Err(ConnectionError::LocallyClosed.into()); + } + }?; + let (initial_transmission, channel_streams) = tokio::select! { + res = establish(&conn, Role::Alfie, our_nonce) => res?, + _ = cancel_dial.cancelled() => { + debug!("dial cancelled during establish"); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"your-conn-wins"); + return Err(ConnectionError::LocallyClosed.into()); + }, + }; Ok(ConnStep::Ready { conn, initial_transmission, @@ -272,9 +341,12 @@ impl PeerManager { peer_info.abort_handle = Some(abort_handle); peer_info.state = PeerState::Pending { intents: vec![intent], + cancel_dial: Some(cancel_dial2), }; } - PeerState::Pending { ref mut intents } => { + PeerState::Pending { + ref mut intents, .. + } => { intents.push(intent); } PeerState::Active { ref update_tx, .. } => { @@ -283,27 +355,53 @@ impl PeerManager { intent.send_abort(Arc::new(Error::ActorFailed)).await; } } + PeerState::Closing { + ref mut new_intents, + .. + } => { + new_intents.push(intent); + } } } #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] fn handle_session_event(&mut self, peer: NodeId, event: SessionEvent) { - trace!(?event, "session event"); match event { SessionEvent::Established => {} SessionEvent::Complete { result, we_cancelled, senders, + remaining_intents, + mut update_receiver, } => { - trace!(error=?result.err(), ?we_cancelled, "session complete"); + trace!(error=?result.err(), ?we_cancelled, ?remaining_intents, "session complete"); + // Close the channel senders. This will cause our send loops to close, // which in turn causes the receive loops of the other peer to close. senders.close_all(); + let Some(peer_info) = self.peers.get_mut(&peer) else { warn!("got session complete for unknown peer"); return; }; + + // TODO(frando): How exactly to deal with the `remaining_intents` is tbd. + // Current impl: We store them in the Closing state, wait if the connection closed with error or not, + // and if it closed with error abort them with this error, otherwise they are dropped (which also closes their event streams). + // We could potentially restart them, as we do with the new intents that came in after session termination, + // but we'd have to think carefully about endless loops there. + + // However, the intents that are still in the update channel are completely unprocessed, so they + // should get their chance via a reconnect. + let mut new_intents = vec![]; + while let Ok(SessionUpdate::SubmitIntent(intent)) = update_receiver.try_recv() { + new_intents.push(intent); + } + peer_info.state = PeerState::Closing { + old_intents: remaining_intents, + new_intents, + }; // Store whether we initiated the termination. We will need this for the graceful termination logic later. peer_info.we_cancelled = we_cancelled; } @@ -311,34 +409,86 @@ impl PeerManager { } #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] - async fn handle_conn_output(&mut self, peer: NodeId, out: Result) -> Result<()> { + async fn handle_conn_output( + &mut self, + our_role: Role, + peer: NodeId, + out: Result, + ) -> Result<()> { let peer_info = self .peers .get_mut(&peer) .context("got conn task output for unknown peer")?; - trace!(?peer, out=?out.as_ref().map(|o| format!("{o}")), "conn task output"); + trace!(out=?out.as_ref().map(|o| format!("{o}")), "conn task output"); match out { Err(err) => { - debug!(peer=%peer.fmt_short(), ?err, "conn task failed"); - let peer = self.peers.remove(&peer).expect("just checked"); - // If we were still in pending state, terminate all pending intents. - if let PeerState::Pending { intents } = peer.state { - let err = Arc::new(Error::Net(err)); - join_all( - intents - .into_iter() - .map(|intent| intent.send_abort(err.clone())), - ) - .await; + trace!(?our_role, current_state=%peer_info.state, "conn task failed: {err:#?}"); + match err.downcast_ref() { + Some(ConnectionError::LocallyClosed) => { + // We cancelled the connection, nothing to do. + debug!("connection was cancelled by us"); + } + Some(ConnectionError::ApplicationClosed(reason)) + if reason.error_code == ERROR_CODE_DUPLICATE_CONN => + { + debug!( + "connection was cancelled by the remote: simultaneous connection and theirs wins" + ); + if our_role != peer_info.our_role { + // TODO: setup a timeout to kill intents if the other conn doesn't make it. + debug!("we are still waiting for their connection to arrive"); + } + } + _ => { + warn!(?err, "connection failed"); + let peer = self.peers.remove(&peer).expect("just checked"); + match peer.state { + PeerState::Pending { intents, .. } => { + // If we were still in pending state, terminate all pending intents. + let err = Arc::new(Error::Net(err)); + join_all( + intents + .into_iter() + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } + PeerState::Closing { + old_intents, + new_intents, + } => { + // If we were are in closing state, we still forward the connection error to the intents. + // This would be the place where we'd implement retries: instead of aborting the intents, resubmit them. + // Right now, we only resubmit intents that were submitted while terminating a session, and only if the session closed gracefully. + let err = Arc::new(Error::Net(err)); + join_all( + old_intents + .into_iter() + .chain(new_intents.into_iter()) + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } + _ => { + // TODO: Not sure if this is good practice? + // A `debug_assert` is far too much, because this can be triggered by other peers. + // However in tests I want to make sure that *all* connections terminate gracefully. + #[cfg(test)] + panic!("connection failed: {err:?}"); + } + } + } } - // We don't need to cancel the session here. It will terminate because all receiver channels are closed. } Ok(ConnStep::Ready { conn, initial_transmission, channel_streams, }) => { - let PeerState::Pending { ref mut intents } = &mut peer_info.state else { + let PeerState::Pending { + ref mut intents, .. + } = &mut peer_info.state + else { drop(conn); // TODO: unreachable? return Err(anyhow!( @@ -350,7 +500,7 @@ impl PeerManager { if self.shutting_down { debug!("connection became ready while shutting down, abort"); - conn.close(ERROR_CODE_IGNORE_CONN.into(), b"shutting-down"); + conn.close(ERROR_CODE_DUPLICATE_CONN.into(), b"shutting-down"); if !intents.is_empty() { let err = Arc::new(Error::ShuttingDown); join_all( @@ -364,14 +514,15 @@ impl PeerManager { } // TODO: Here we should check again that we are not establishing a duplicate connection. - debug!("connection ready: init session"); + debug!(?our_role, "connection ready: init session"); let (channels, fut) = prepare_channels(channel_streams)?; let conn_handle = ConnHandle { initial_transmission, channels, - our_role: peer_info.our_role, + our_role, peer, }; + peer_info.our_role = our_role; let session_handle = self.actor.init_session(conn_handle, intents).await?; let fut = fut.map_ok(move |()| ConnStep::Done { conn }); @@ -400,14 +551,47 @@ impl PeerManager { Ok(ConnStep::Closed { conn, error }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); - peer_info.abort_handle = Some(abort_handle); + if let PeerState::Closing { .. } = &peer_info.state { + peer_info.abort_handle = Some(abort_handle); + } else { + // TODO: What do we do with the closing abort handle in case we have a new connection already? + } } Ok(ConnStep::Closed { error, conn }) => { - match error { + match &error { None => debug!("connection closed gracefully"), Some(error) => warn!(?error, "failed to close connection gracefully"), } - self.peers.remove(&peer); + if let PeerState::Closing { + ref mut new_intents, + ref mut old_intents, + } = peer_info.state + { + if let Some(error) = error { + // If the connection did not close gracefully, terminate the pending intents with the connection error. + let err = Arc::new(Error::Net(error.into())); + join_all( + old_intents + .drain(..) + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } else { + // Otherwise, just drop the old intents. + let _ = old_intents.drain(..); + }; + let intents = std::mem::take(new_intents); + self.peers.remove(&peer); + if !intents.is_empty() { + debug!( + "resubmitting {} intents that were not yet processed", + intents.len() + ); + for intent in intents { + self.submit_intent(peer, intent).await; + } + } + } drop(conn); } } @@ -426,6 +610,7 @@ impl PeerManager { abort_handle.abort(); } } + PeerState::Closing { .. } => {} PeerState::Active { cancel_token, .. } => { // We are in active state. We cancel our session, which leads to graceful connection termination. cancel_token.cancel(); @@ -436,13 +621,14 @@ impl PeerManager { } fn spawn_conn_task( - conn_tasks: &mut JoinSet<(NodeId, Result)>, + conn_tasks: &mut JoinSet<(Role, NodeId, Result)>, peer_info: &PeerInfo, fut: impl Future> + Send + 'static, ) -> AbortHandle { let node_id = peer_info.node_id; + let our_role = peer_info.our_role; let fut = fut - .map(move |res| (node_id, res)) + .map(move |res| (our_role, node_id, res)) .instrument(peer_info.span.clone()); conn_tasks.spawn(fut) } @@ -470,16 +656,21 @@ impl PeerInfo { } } -#[derive(Debug)] +#[derive(Debug, strum::Display)] enum PeerState { None, Pending { intents: Vec, + cancel_dial: Option, }, Active { cancel_token: CancellationToken, update_tx: mpsc::Sender, }, + Closing { + old_intents: Vec, + new_intents: Vec, + }, } #[derive(derive_more::Debug, strum::Display)] diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 615f2d0179..63f734c68d 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; -use iroh_net::endpoint::{Connection, ConnectionError, RecvStream, SendStream}; +use iroh_net::endpoint::{Connection, ConnectionError, RecvStream, SendStream, VarInt}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{debug, trace}; @@ -31,10 +31,10 @@ pub const CHANNEL_CAP: usize = 1024 * 64; pub const ALPN: &[u8] = b"iroh-willow/0"; /// Our QUIC application error code for graceful connection termination. -pub const ERROR_CODE_OK: u32 = 1; +pub const ERROR_CODE_OK: VarInt = VarInt::from_u32(1); /// Our QUIC application error code when closing connections during establishment /// because we prefer another existing connection to the same peer. -pub const ERROR_CODE_IGNORE_CONN: u32 = 2; +pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(2); /// The handle to an active peer connection. /// diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 38b29a05cf..3f5d9d5d6b 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -190,6 +190,9 @@ pub enum SessionEvent { we_cancelled: bool, #[debug("ChannelSenders")] senders: ChannelSenders, + remaining_intents: Vec, + #[debug("Receiver")] + update_receiver: mpsc::Receiver, }, } @@ -211,6 +214,7 @@ impl SessionHandle { result, senders, we_cancelled, + .. } = event { return result.map(|()| (senders, we_cancelled)); diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index e6f3894ba3..7b76a67c1b 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -273,6 +273,7 @@ impl IntentDispatcher { } } + /// Aborts all registered intents. pub(super) async fn abort_all(&self, error: Arc) { let _ = futures_buffered::join_all( Iterator::chain( @@ -292,6 +293,36 @@ impl IntentDispatcher { .await; } + /// Takes self and returns all pending intents. + // TODO: What if one of the two channels closed? + // Should not do Option but an option for each direction instead likely on Intent. + pub(super) fn drain_all(mut self) -> Vec { + let mut intents: Vec<_> = self.pending_intents.into_iter().collect(); + for (id, info) in self.intents.drain() { + let event_tx = info.event_tx; + let update_rx = self.intent_update_rx.remove(&id); + let update_rx = update_rx + .map(|stream| stream.into_inner()) + .flatten() + .map(|stream| stream.into_inner()); + let channels = match (event_tx, update_rx) { + (Some(event_tx), Some(update_rx)) => Some(IntentChannels { + event_tx, + update_rx, + }), + _ => None, + }; + if let Some(channels) = channels { + let intent = Intent { + init: info.original_init, + channels: Some(channels), + }; + intents.push(intent); + } + } + intents + } + /// Run the [`IntentDispatcher`]. /// /// The returned stream is a generator, so it must be polled repeatedly to progress. @@ -347,7 +378,7 @@ impl IntentDispatcher { } async fn submit_intent(&mut self, co: &Co, intent: Intent) -> Result<(), Error> { - let interests = self.auth.resolve_interests(intent.init.interests)?; + let interests = self.auth.resolve_interests(intent.init.interests.clone())?; let intent_id = { let intent_id = self.next_intent_id; self.next_intent_id += 1; @@ -365,6 +396,7 @@ impl IntentDispatcher { interests: flatten_interests(&interests), mode: intent.init.mode, event_tx, + original_init: intent.init, }; // Send out reconciled events for already-complete areas. for (namespace, areas) in &self.complete_areas { @@ -450,6 +482,7 @@ impl IntentDispatcher { #[derive(Debug)] pub(super) struct IntentInfo { + original_init: SessionInit, interests: NamespaceInterests, mode: SessionMode, event_tx: Option>, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index ad23b7d1af..ade3b5db8a 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -117,11 +117,24 @@ impl Reconciler { async fn received_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { match message { ReconciliationMessage::SendFingerprint(message) => { - self.targets - .get_eventually(&self.shared, &message.handles()) - .await? + let target_id = message.handles(); + let target = self + .targets + .get_eventually(&self.shared, &target_id) + .await?; + target .received_send_fingerprint(&self.shared, message) .await?; + tracing::warn!( + is_complete = target.is_complete(), + started = target.started, + our_uncovered_ranges = ?target.our_uncovered_ranges, + current_entry_none = self.current_entry.is_none(), + "received_send_fingerprint done" + ); + if target.is_complete() && self.current_entry.is_none() { + self.complete_target(target_id).await?; + } } ReconciliationMessage::AnnounceEntries(message) => { let target_id = message.handles(); @@ -419,6 +432,7 @@ impl Target { shared: &Shared, message: ReconciliationSendFingerprint, ) -> Result<(), Error> { + self.started = true; if let Some(range_count) = message.covers { self.mark_our_range_covered(range_count)?; } @@ -486,6 +500,8 @@ impl Target { shared: &Shared, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { + trace!(?message, "received_announce_entries start"); + self.started = true; if let Some(range_count) = message.covers { self.mark_our_range_covered(range_count)?; } @@ -587,7 +603,6 @@ impl Target { fn mark_our_next_range_pending(&mut self) { let range_count = self.next_range_count_ours(); - self.started = true; self.our_uncovered_ranges.insert(range_count); } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 581227d0c9..ae23a8a8a8 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,7 +1,7 @@ use std::{future::Future, sync::Arc}; use futures_concurrency::{future::TryJoin, stream::StreamExt as _}; -use futures_lite::{Stream, StreamExt as _}; +use futures_lite::StreamExt as _; use strum::IntoEnumIterator; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -43,7 +43,7 @@ pub(crate) async fn run_session( cancel_token: CancellationToken, session_id: SessionId, event_sender: EventSender, - update_receiver: impl Stream + Unpin + 'static, + update_receiver: ReceiverStream, ) -> Result<(), Arc> { let ConnHandle { peer: _, @@ -346,34 +346,40 @@ pub(crate) async fn run_session( .try_join() .await; - let result = match result { - Ok(_) => Ok(()), - Err(err) => Err(Arc::new(err)), - }; - - // Unsubscribe from the store. This stops the data send task. + // Unsubscribe from the store. store.entries().unsubscribe(&session_id); + let result = result.map_err(Arc::new).map(|_| ()); + debug!(error=?result.as_ref().err(), ?we_cancelled, "session complete"); + let remaining_intents = match result.as_ref() { + Ok(()) => { + // If the session closed without an error, return the remaining intents + // so that they can potentially be restarted. + intents.drain_all() + } + Err(err) => { + // If the session closed with error, abort the intents with that error. + intents.abort_all(err.clone()).await; + vec![] + } + }; + if let Err(_receiver_dropped) = event_sender .send(SessionEvent::Complete { result: result.clone(), we_cancelled, senders: channel_sender, + remaining_intents, + update_receiver: update_receiver.into_inner().into_inner(), }) .await { warn!("failed to send session complete event: receiver dropped"); } - match result { - Ok(()) => Ok(()), - Err(error) => { - intents.abort_all(error.clone()).await; - Err(error) - } - } + result } async fn control_loop( diff --git a/iroh-willow/src/util/stream.rs b/iroh-willow/src/util/stream.rs index 97c1a56843..ce93003467 100644 --- a/iroh-willow/src/util/stream.rs +++ b/iroh-willow/src/util/stream.rs @@ -26,6 +26,9 @@ impl Cancelable { is_cancelled: false, } } + pub fn into_inner(self) -> S { + self.stream + } } impl Stream for Cancelable { diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index d76b5044d4..ef95b8bcea 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -228,6 +228,44 @@ async fn peer_manager_shutdown_timeout() -> Result<()> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_twoway_loop() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_twoway_loop"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + insert(&alfie, namespace, alfie_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + let alfie_node_id = alfie.node_id(); + let betty_node_id = betty.node_id(); + for _i in 0..20 { + let alfie = alfie.clone(); + let betty = betty.clone(); + let task_alfie = tokio::task::spawn(async move { + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await + .unwrap(); + let completion = intent.complete().await.expect("failed to complete intent"); + assert_eq!(completion, Completion::Complete); + }); + + let task_betty = tokio::task::spawn(async move { + let mut intent = betty + .sync_with_peer(alfie_node_id, SessionInit::reconcile_once(Interests::all())) + .await + .unwrap(); + let completion = intent.complete().await.expect("failed to complete intent"); + assert_eq!(completion, Completion::Complete); + }); + task_alfie.await.unwrap(); + task_betty.await.unwrap(); + } + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + mod util { use std::sync::{Arc, Mutex}; @@ -282,11 +320,15 @@ mod util { let endpoint = endpoint.clone(); async move { while let Some(mut conn) = endpoint.accept().await { - let alpn = conn.alpn().await?; + let Ok(alpn) = conn.alpn().await else { + continue; + }; if alpn != ALPN { continue; } - let conn = conn.await?; + let Ok(conn) = conn.await else { + continue; + }; engine.handle_connection(conn).await?; } Result::Ok(()) From b799469a7bb114ad1b3b2a979a0c0e6fb32f6483 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 17:04:36 +0200 Subject: [PATCH 110/198] fix after rebase --- iroh-willow/src/engine/peer_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 532612008a..8d223df8f3 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -317,7 +317,7 @@ impl PeerManager { // Future that dials and establishes the connection. Can be cancelled for simultaneous connection. let fut = async move { let conn = tokio::select! { - res = endpoint.connect_by_node_id(&peer, ALPN) => res, + res = endpoint.connect_by_node_id(peer, ALPN) => res, _ = cancel_dial.cancelled() => { debug!("dial cancelled during dial"); return Err(ConnectionError::LocallyClosed.into()); From 4126a4e9d83b18d3f0461866df6d78aafdfbdb73 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 17:07:18 +0200 Subject: [PATCH 111/198] remove debug log --- iroh-willow/src/session/reconciler.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index ade3b5db8a..28082d8f05 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -125,13 +125,6 @@ impl Reconciler { target .received_send_fingerprint(&self.shared, message) .await?; - tracing::warn!( - is_complete = target.is_complete(), - started = target.started, - our_uncovered_ranges = ?target.our_uncovered_ranges, - current_entry_none = self.current_entry.is_none(), - "received_send_fingerprint done" - ); if target.is_complete() && self.current_entry.is_none() { self.complete_target(target_id).await?; } From a7660aeeddf25e314be523d7b4cad1bfbe0bd64d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 17:12:21 +0200 Subject: [PATCH 112/198] fix net_live test --- iroh-willow/src/net.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 63f734c68d..35f12ac1d1 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -662,18 +662,9 @@ mod tests { .complete() .await .expect("failed to close alfie session"); + info!("close alfie session"); senders_alfie.close_all(); - let (res_alfie, res_betty) = tokio::join!( - intent_handle_alfie.complete(), - intent_handle_betty.complete() - ); - info!(time=?start.elapsed(), "reconciliation finished"); - info!("alfie intent res {:?}", res_alfie); - info!("betty intent res {:?}", res_betty); - assert!(res_alfie.is_ok()); - assert!(res_betty.is_ok()); - let (senders_betty, betty_cancelled) = session_betty .complete() .await @@ -685,6 +676,16 @@ mod tests { r1.unwrap(); r2.unwrap(); + let (res_alfie, res_betty) = tokio::join!( + intent_handle_alfie.complete(), + intent_handle_betty.complete() + ); + info!(time=?start.elapsed(), "reconciliation finished"); + info!("alfie intent res {:?}", res_alfie); + info!("betty intent res {:?}", res_betty); + assert!(res_alfie.is_ok()); + assert!(res_betty.is_ok()); + let (error_alfie, error_betty) = tokio::try_join!( terminate_gracefully(&conn_alfie, node_id_alfie, node_id_betty, alfie_cancelled), terminate_gracefully(&conn_betty, node_id_betty, node_id_alfie, betty_cancelled), From 2c9a2fc3aea174b2b944d5cf7de685b8dd893e97 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 17:16:46 +0200 Subject: [PATCH 113/198] make util pub for now --- iroh-willow/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 90bb205ff3..5115341647 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -10,4 +10,4 @@ pub mod net; pub mod proto; pub mod session; pub mod store; -mod util; +pub mod util; From 23ff67514175ee71de1259b19b6905d97108799b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 17:38:07 +0200 Subject: [PATCH 114/198] docs: net module --- iroh-willow/src/net.rs | 43 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 35f12ac1d1..2113953002 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -52,6 +52,14 @@ pub(crate) struct ConnHandle { /// Establish the connection by running the initial transmission and /// opening the streams for the control and logical channels. +/// +/// The initial transmission is transferred over a pair of uni streams. +/// All channels for the actual WGPS are bi streams. +/// Returns the initial transmission and [`ChannelStreams], which is an +/// array of send and receive streams, one for each WGPS channel. +/// +/// To start the networking loops that pipe the QUIC streams into our +/// internal channel streams use [`prepare_channels`]. pub(crate) async fn establish( conn: &Connection, our_role: Role, @@ -139,6 +147,11 @@ async fn open_channel_streams(conn: &Connection, our_role: Role) -> Result Result<(Channels, impl Future> + Send)> { @@ -256,6 +269,36 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul Ok(()) } +/// Terminate a connection gracefully. +/// +/// QUIC does not allow us to rely on stream terminations, because those only signal +/// reception in the peer's QUIC stack, not in the application. Closing a QUIC connection +/// triggers immediate termination, so to make sure that all data was actually processed +/// by our session, we exchange a single byte over a pair of uni streams. As this is the only +/// use of uni streams after the initial connection handshake, we do not have to identify the +/// streams specifically. +/// +/// This function may only be called once the session processing has fully terminated and all +/// WGPS streams are closed (for send streams) and read to end (for recv streams) on our side. +/// +/// `we_cancelled` is a boolean indicating whether we are terminating the connection after +/// we willfully terminated or completed our session. Pass `false` if the session terminated +/// because the other peer closed their WGPS streams. +/// +/// If only one peer indicated that they initiated the termination by setting `we_cancelled` +/// to `true`, this peer will *not* close the connection, but instead wait for the other peer +/// to close the connection. +/// If both peers indicated that they initiated the termination, the peer with the higher node id +/// will close the connection first. +/// If none of the peers said they closed, which likely is a bug in the implementation, both peers +/// will close the connection. +/// +/// A connection is considered to be closed gracefully if and only if this procedure is run to end +/// successfully, and if the connection is closed with the expected error code. +/// +/// Returns an error if the termination flow was aborted prematurely. +/// Returns a [`ConnectionError] if the termination flow was completed successfully, but the connection +/// was not closed with the expected error code. pub(crate) async fn terminate_gracefully( conn: &Connection, me: NodeId, From 684d6caf3189bf8f19a43b5f41cf87f5caf0e002 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 21:00:20 +0200 Subject: [PATCH 115/198] fix: cleanup quic connection error codes --- iroh-willow/src/engine/peer_manager.rs | 21 ++++++--------------- iroh-willow/src/net.rs | 13 ++++++++----- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 8d223df8f3..43ee93ebde 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -22,7 +22,7 @@ use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ net::{ establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, - ERROR_CODE_DUPLICATE_CONN, + ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_SHUTDOWN, }, proto::sync::{AccessChallenge, InitialTransmission}, session::{ @@ -231,10 +231,7 @@ impl PeerManager { } => match peer_info.our_role { Role::Betty => { debug!("ignore incoming connection (already accepting)"); - conn.close( - ERROR_CODE_DUPLICATE_CONN.into(), - b"duplicate-already-pending", - ); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-already-accepting"); false } Role::Alfie => { @@ -246,20 +243,14 @@ impl PeerManager { true } else { debug!("ignore incoming connection (already dialing and ours wins)"); - conn.close( - ERROR_CODE_DUPLICATE_CONN.into(), - b"duplicate-already-pending", - ); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-our-dial-wins"); false } } }, PeerState::Active { .. } => { debug!("ignore incoming connection (already active)"); - conn.close( - ERROR_CODE_DUPLICATE_CONN.into(), - b"duplicate-already-active", - ); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-already-active"); false } PeerState::Closing { .. } => true, @@ -327,7 +318,7 @@ impl PeerManager { res = establish(&conn, Role::Alfie, our_nonce) => res?, _ = cancel_dial.cancelled() => { debug!("dial cancelled during establish"); - conn.close(ERROR_CODE_DUPLICATE_CONN, b"your-conn-wins"); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-your-dial-wins"); return Err(ConnectionError::LocallyClosed.into()); }, }; @@ -500,7 +491,7 @@ impl PeerManager { if self.shutting_down { debug!("connection became ready while shutting down, abort"); - conn.close(ERROR_CODE_DUPLICATE_CONN.into(), b"shutting-down"); + conn.close(ERROR_CODE_SHUTDOWN, b"shutting-down"); if !intents.is_empty() { let err = Arc::new(Error::ShuttingDown); join_all( diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 2113953002..2abdda271e 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -30,12 +30,15 @@ pub const CHANNEL_CAP: usize = 1024 * 64; /// The ALPN protocol name for iroh-willow. pub const ALPN: &[u8] = b"iroh-willow/0"; -/// Our QUIC application error code for graceful connection termination. +/// QUIC application error code for graceful connection termination. pub const ERROR_CODE_OK: VarInt = VarInt::from_u32(1); -/// Our QUIC application error code when closing connections during establishment -/// because we prefer another existing connection to the same peer. + +/// QUIC application error code for closing connections because another connection is preferred. pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(2); +/// QUIC application error code when closing connection because our node is shutting down. +pub const ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(3); + /// The handle to an active peer connection. /// /// This is passed into the session loop, where it is used to send and receive messages @@ -351,12 +354,12 @@ pub(crate) async fn terminate_gracefully( }; debug!(?who_cancelled, "connection complete"); if we_close_first { - conn.close(ERROR_CODE_OK.into(), b"bye"); + conn.close(ERROR_CODE_OK, b"bye"); } let reason = conn.closed().await; let is_graceful = match &reason { ConnectionError::LocallyClosed if we_close_first => true, - ConnectionError::ApplicationClosed(frame) if frame.error_code == ERROR_CODE_OK.into() => { + ConnectionError::ApplicationClosed(frame) if frame.error_code == ERROR_CODE_OK => { !we_close_first || matches!(who_cancelled, WhoCancelled::NoneDid) } _ => false, From 441020b95afe58c818247b8b0d61d2214b1d9451 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Thu, 8 Aug 2024 21:10:22 +0200 Subject: [PATCH 116/198] feat: add timeouts for connection establishment and shutdown --- iroh-willow/src/engine/peer_manager.rs | 2 +- iroh-willow/src/net.rs | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 43ee93ebde..e5b2c7ec27 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -423,7 +423,7 @@ impl PeerManager { if reason.error_code == ERROR_CODE_DUPLICATE_CONN => { debug!( - "connection was cancelled by the remote: simultaneous connection and theirs wins" + "connection was cancelled by the remote: simultaneous connection and their's wins" ); if our_role != peer_info.our_role { // TODO: setup a timeout to kill intents if the other conn doesn't make it. diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 2abdda271e..ee8fcfe88b 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,4 +1,4 @@ -use std::future::Future; +use std::{future::Future, time::Duration}; use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; @@ -39,6 +39,9 @@ pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(2); /// QUIC application error code when closing connection because our node is shutting down. pub const ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(3); +pub const ESTABLISH_TIMEOUT: Duration = Duration::from_secs(10); +pub const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); + /// The handle to an active peer connection. /// /// This is passed into the session loop, where it is used to send and receive messages @@ -71,12 +74,12 @@ pub(crate) async fn establish( debug!(?our_role, "establishing connection"); // Run the initial transmission (which works on uni streams) concurrently // with opening/accepting the bi streams for the channels. - ( + let fut = ( initial_transmission(conn, our_nonce), open_channel_streams(conn, our_role), ) - .try_join() - .await + .try_join(); + tokio::time::timeout(ESTABLISH_TIMEOUT, fut).await? } async fn initial_transmission( @@ -329,7 +332,8 @@ pub(crate) async fn terminate_gracefully( Ok(they_cancelled) }; - let (_, they_cancelled) = (send, recv).try_join().await?; + let send_and_recv = (send, recv).try_join(); + let (_, they_cancelled) = tokio::time::timeout(SHUTDOWN_TIMEOUT, send_and_recv).await??; #[derive(Debug)] enum WhoCancelled { From b9fb6f25c378874ebcebaf3a9531be872333f877 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sun, 11 Aug 2024 10:30:34 +0200 Subject: [PATCH 117/198] refactor: add willow-rs dependencies --- Cargo.lock | 179 ++++++++++++++++++++++++++++++----------- Cargo.toml | 8 ++ iroh-willow/Cargo.toml | 21 +++-- 3 files changed, 152 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6017f1bbc5..cce3262300 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -214,7 +214,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", "synstructure 0.13.1", ] @@ -237,7 +237,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -254,13 +254,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -354,7 +354,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -705,7 +705,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1054,7 +1054,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1078,7 +1078,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1089,7 +1089,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1158,7 +1158,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1188,7 +1188,7 @@ checksum = "27d919ced7590fc17b5d5a3c63b662e8a7d2324212c4e4dbbed975cafd22d16d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", "unicode-xid", ] @@ -1271,7 +1271,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1372,9 +1372,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elliptic-curve" @@ -1431,7 +1431,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1444,7 +1444,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1464,7 +1464,7 @@ checksum = "5c785274071b1b420972453b306eeca06acf4633829db4223b58a2a8c5953bc4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -1741,7 +1741,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -3021,6 +3021,7 @@ dependencies = [ "iroh-metrics", "iroh-net", "iroh-test", + "meadowcap", "postcard", "proptest", "rand", @@ -3030,6 +3031,7 @@ dependencies = [ "serde", "sha2", "strum 0.26.2", + "syncify", "tempfile", "test-strategy", "thiserror", @@ -3038,6 +3040,9 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", + "ufotofu", + "willow-data-model", + "willow-encoding", "zerocopy 0.8.0-alpha.16", ] @@ -3218,12 +3223,31 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-std" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4958ec1997b05011d5c786bf4093cd48578bd9be2737350ab38659694083ddde" + [[package]] name = "md5" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" +[[package]] +name = "meadowcap" +version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" +dependencies = [ + "either", + "signature", + "syncify", + "ufotofu", + "willow-data-model", + "willow-encoding", +] + [[package]] name = "memalloc" version = "0.1.0" @@ -3581,7 +3605,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -3801,7 +3825,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -3832,7 +3856,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -3957,7 +3981,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -4141,9 +4165,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.80" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56dea16b0a29e94408b9aa5e2940a4eedbd128a1ba20e8f7ae60fd3d465af0e" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -4168,7 +4192,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -4558,7 +4582,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5115,7 +5139,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5196,7 +5220,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5437,7 +5461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" dependencies = [ "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5490,7 +5514,7 @@ dependencies = [ "proc-macro2", "quote", "struct_iterable_internal", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5508,7 +5532,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5519,7 +5543,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5550,7 +5574,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5563,7 +5587,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5640,9 +5664,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.59" +version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a6531ffc7b071655e4ce2e04bd464c4830bb585a61cabb96cf808f05172615a" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", @@ -5672,6 +5696,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +[[package]] +name = "syncify" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e2f83220c0c5abf77ec9f4910c6590f75f1bf1405c7f2762bf35fb1bd11c5e7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "synstructure" version = "0.12.6" @@ -5692,7 +5727,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5778,7 +5813,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5818,7 +5853,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -5916,7 +5951,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -6180,7 +6215,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -6288,6 +6323,23 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "ufotofu" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c773ca845a07603d8ebe7292a3cefa20bf41305ce91246332c7fc1e3029eecaa" +dependencies = [ + "either", + "ufotofu_queues", + "wrapper", +] + +[[package]] +name = "ufotofu_queues" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d903d5bc0e14d24559dac3b9690d004ad3fb08d66f93d87d28f5cb3466b5b55b" + [[package]] name = "unarray" version = "0.1.4" @@ -6484,7 +6536,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", "wasm-bindgen-shared", ] @@ -6518,7 +6570,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6583,6 +6635,28 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +[[package]] +name = "willow-data-model" +version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" +dependencies = [ + "bytes", + "either", + "syncify", + "ufotofu", + "willow-encoding", +] + +[[package]] +name = "willow-encoding" +version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" +dependencies = [ + "either", + "syncify", + "ufotofu", +] + [[package]] name = "winapi" version = "0.3.9" @@ -6682,7 +6756,7 @@ checksum = "12168c33176773b86799be25e2a2ba07c7aab9968b37541f1094dbd7a60c8946" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -6693,7 +6767,7 @@ checksum = "9d8dc32e0095a7eeccebd0e3f09e9509365ecb3fc6ac4d6f5f14a3f6392942d1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -6896,6 +6970,15 @@ dependencies = [ "windows 0.52.0", ] +[[package]] +name = "wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2bbc2d8fbd2f17a297df6d6492526a91b947c04ec80536c0877fd35df1c889d" +dependencies = [ + "maybe-std", +] + [[package]] name = "wyz" version = "0.5.1" @@ -7001,7 +7084,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] @@ -7012,7 +7095,7 @@ checksum = "76fc519c421ad48c6c8ba02cee449398d54276c839887f9f3562d1862b43b91c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.72", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 37706dd4cf..4ddb52cbd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,3 +41,11 @@ missing_debug_implementations = "warn" [workspace.lints.clippy] unused-async = "warn" + +[patch.crates-io] +# willow-data-model = { path = "../willow-rs/data-model" } +# willow-encoding = { path = "../willow-rs/encoding" } +# meadowcap = { path = "../willow-rs/meadowcap" } +willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index b4e03230ae..be77e3ca6d 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -17,33 +17,38 @@ workspace = true [dependencies] anyhow = "1" bytes = { version = "1.4", features = ["serde"] } -derive_more = { version = "1.0.0-beta.6", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from"] } +curve25519-dalek = { version = "4.1.3", features = [ "digest", "rand_core", "serde", ] } +derive_more = { version = "1.0.0-beta.6", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } flume = "0.11" +futures-buffered = "0.2.6" futures-concurrency = "7.6.0" futures-lite = "2.3.0" futures-util = "0.3.30" genawaiter = "0.99.1" +hex = "0.4.3" iroh-base = { version = "0.22.0", path = "../iroh-base" } +iroh-blobs = { version = "0.22.0", path = "../iroh-blobs" } iroh-metrics = { version = "0.22.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.22.0", path = "../iroh-net" } -iroh-blobs = { version = "0.22.0", path = "../iroh-blobs" } -postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } +meadowcap = "0.1.0" +postcard = { version = "1", default-features = false, features = [ "alloc", "use-std", "experimental-derive", ] } rand = "0.8.5" rand_core = "0.6.4" redb = { version = "2.0.0" } serde = { version = "1.0.164", features = ["derive"] } +sha2 = "0.10.8" strum = { version = "0.26", features = ["derive"] } +syncify = "0.1.0" thiserror = "1" tokio = { version = "1", features = ["sync"] } +tokio-stream = { version = "0.1.15", features = ["sync"] } tokio-util = { version = "0.7", features = ["io-util", "io"] } tracing = "0.1" +ufotofu = { version = "0.4.1", features = ["std"] } +willow-data-model = "0.1.0" +willow-encoding = "0.1.0" zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } -hex = "0.4.3" -curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core", "serde"] } -sha2 = "0.10.8" -futures-buffered = "0.2.6" -tokio-stream = { version = "0.1.15", features = ["sync"] } [dev-dependencies] iroh-test = { path = "../iroh-test" } From 682d4af402aad720f26bf09d1829840ee527451e Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Sun, 11 Aug 2024 10:33:46 +0200 Subject: [PATCH 118/198] [wip] refactor: start to use willow-rs --- iroh-willow/src/lib.rs | 22 +- iroh-willow/src/proto.rs | 5 +- iroh-willow/src/proto/challenge.rs | 119 +-- iroh-willow/src/proto/data_model.rs | 389 +++++++ iroh-willow/src/proto/grouping.rs | 730 ++----------- iroh-willow/src/proto/keys.rs | 159 ++- iroh-willow/src/proto/meadowcap.rs | 1413 +++++++++++++------------- iroh-willow/src/proto/pai.rs | 109 +- iroh-willow/src/proto/willow.rs | 484 --------- iroh-willow/src/session/challenge.rs | 108 ++ iroh-willow/src/session/error.rs | 2 + iroh-willow/src/util.rs | 16 + iroh-willow/tests/basic.rs | 854 ++++++++-------- 13 files changed, 2046 insertions(+), 2364 deletions(-) create mode 100644 iroh-willow/src/proto/data_model.rs delete mode 100644 iroh-willow/src/proto/willow.rs create mode 100644 iroh-willow/src/session/challenge.rs diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 5115341647..ceedf2cde7 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -3,11 +3,21 @@ #![allow(missing_docs)] #![deny(unsafe_code)] -pub mod auth; -pub mod engine; -pub mod form; -pub mod net; +// pub mod auth; +// pub mod engine; +// pub mod form; +// pub mod net; pub mod proto; -pub mod session; -pub mod store; +// pub mod session; +// pub mod store; pub mod util; + +/// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, +/// and the other peer as Betty. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum Role { + /// The peer that initiated the synchronisation session. + Alfie, + /// The peer that accepted the synchronisation session. + Betty, +} diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index d527548475..279fe07d0e 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -3,5 +3,6 @@ pub mod grouping; pub mod keys; pub mod meadowcap; pub mod pai; -pub mod sync; -pub mod willow; +// pub mod sync; +pub mod data_model; +pub use self::data_model as willow; diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/challenge.rs index a0d469c867..e2a5b4e397 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/challenge.rs @@ -1,105 +1,56 @@ -use crate::{ - proto::sync::AccessChallengeBytes, - session::{Error, Role}, -}; +use iroh_base::base32::fmt_short; +use iroh_blobs::Hash; +use rand::Rng; +use rand_core::CryptoRngCore; +use serde::{Deserialize, Serialize}; -use super::{ - keys::{UserPublicKey, UserSecretKey, UserSignature}, - sync::{AccessChallenge, ChallengeHash}, -}; +use super::data_model::DIGEST_LENGTH; -#[derive(Debug)] -pub enum ChallengeState { - Committed { - our_nonce: AccessChallenge, - received_commitment: ChallengeHash, - }, - Revealed { - ours: AccessChallengeBytes, - theirs: AccessChallengeBytes, - }, -} +pub const CHALLENGE_LENGTH: usize = 32; +pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; -impl ChallengeState { - pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { - match self { - Self::Committed { - our_nonce, - received_commitment, - } => { - if their_nonce.hash() != *received_commitment { - return Err(Error::BrokenCommittement); - } - let ours = match our_role { - Role::Alfie => bitwise_xor(our_nonce.to_bytes(), their_nonce.to_bytes()), - Role::Betty => { - bitwise_xor_complement(our_nonce.to_bytes(), their_nonce.to_bytes()) - } - }; - let theirs = bitwise_complement(ours); - *self = Self::Revealed { ours, theirs }; - Ok(()) - } - _ => Err(Error::InvalidMessageInCurrentState), - } - } +#[derive(derive_more::Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct ChallengeHash(#[debug("{}..", fmt_short(self.0))] [u8; CHALLENGE_HASH_LENGTH]); - pub fn is_revealed(&self) -> bool { - matches!(self, Self::Revealed { .. }) +impl ChallengeHash { + pub fn as_bytes(&self) -> &[u8] { + &self.0 } - pub fn sign(&self, secret_key: &UserSecretKey) -> Result { - let signable = self.signable()?; - let signature = secret_key.sign(&signable); - Ok(signature) + pub fn from_bytes(bytes: [u8; CHALLENGE_HASH_LENGTH]) -> Self { + Self(bytes) } +} - pub fn signable(&self) -> Result<[u8; 32], Error> { - let challenge = self.get_ours()?; - Ok(*challenge) - } +#[derive(derive_more::Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct AccessChallenge(#[debug("{}..", fmt_short(self.0))] AccessChallengeBytes); + +pub type AccessChallengeBytes = [u8; CHALLENGE_LENGTH]; - pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { - let their_challenge = self.get_theirs()?; - user_key.verify(their_challenge, signature)?; - Ok(()) +impl Default for AccessChallenge { + fn default() -> Self { + Self::generate() } +} - fn get_ours(&self) -> Result<&AccessChallengeBytes, Error> { - match self { - Self::Revealed { ours, .. } => Ok(ours), - _ => Err(Error::InvalidMessageInCurrentState), - } +impl AccessChallenge { + pub fn generate() -> Self { + Self(rand::random()) } - fn get_theirs(&self) -> Result<&AccessChallengeBytes, Error> { - match self { - Self::Revealed { theirs, .. } => Ok(theirs), - _ => Err(Error::InvalidMessageInCurrentState), - } + pub fn generate_with_rng(rng: &mut impl CryptoRngCore) -> Self { + Self(rng.gen()) } -} -fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { - res[i] = x1 ^ x2; + pub fn as_bytes(&self) -> &[u8] { + &self.0 } - res -} -fn bitwise_complement(a: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, x) in a.iter().enumerate() { - res[i] = !x; + pub fn to_bytes(&self) -> [u8; 32] { + self.0 } - res -} -fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { - let mut res = [0u8; N]; - for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { - res[i] = !(x1 ^ x2); + pub fn hash(&self) -> ChallengeHash { + ChallengeHash(*Hash::new(self.0).as_bytes()) } - res } diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs new file mode 100644 index 0000000000..be18ac2862 --- /dev/null +++ b/iroh-willow/src/proto/data_model.rs @@ -0,0 +1,389 @@ +use iroh_base::hash::Hash; +use willow_data_model::InvalidPathError; + +use super::{ + keys, + meadowcap::{self}, +}; + +/// A type for identifying namespaces. +pub type NamespaceId = keys::NamespaceId; + +/// A type for identifying subspaces. +pub type SubspaceId = keys::UserId; + +/// The capability type needed to authorize writes. +pub type WriteCapability = meadowcap::McCapability; + +/// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). +/// Timestamps are to be interpreted as a time in microseconds since the Unix epoch. +pub type Timestamp = willow_data_model::Timestamp; + +// A for proving write permission. +pub type AuthorisationToken = meadowcap::McAuthorisationToken; + +/// A natural number for limiting the length of path components. +pub const MAX_COMPONENT_LENGTH: usize = 4096; + +/// A natural number for limiting the number of path components. +pub const MAX_COMPONENT_COUNT: usize = 1024; + +/// A natural number max_path_length for limiting the overall size of paths. +pub const MAX_PATH_LENGTH: usize = 4096; + +/// The byte length of a [`PayloadDigest`]. +pub const DIGEST_LENGTH: usize = 32; + +pub type Component<'a> = willow_data_model::Component<'a, MAX_COMPONENT_LENGTH>; + +#[derive( + Debug, + Clone, + Copy, + Hash, + Eq, + PartialEq, + Ord, + PartialOrd, + derive_more::From, + derive_more::Into, + derive_more::Display, +)] +pub struct PayloadDigest(pub Hash); + +impl Default for PayloadDigest { + fn default() -> Self { + Self(Hash::from_bytes([0u8; 32])) + } +} + +// #[derive( +// Debug, +// Clone, +// Hash, +// Eq, +// PartialEq, +// Ord, +// PartialOrd, +// derive_more::From, +// derive_more::Into, +// derive_more::Deref, +// )] +// pub struct Path( +// willow_data_model::Path, +// ); + +pub type Path = willow_data_model::Path; + +#[derive(Debug, thiserror::Error)] +/// An error arising from trying to construct a invalid [`Path`] from valid components. +pub enum InvalidPathError2 { + /// One of the path's component is too large. + #[error("One of the path's component is too large.")] + ComponentTooLong(usize), + /// The path's total length in bytes is too large. + #[error("The path's total length in bytes is too large.")] + PathTooLong, + /// The path has too many components. + #[error("The path has too many components.")] + TooManyComponents, +} + +impl From for InvalidPathError2 { + fn from(value: InvalidPathError) -> Self { + match value { + InvalidPathError::PathTooLong => Self::PathTooLong, + InvalidPathError::TooManyComponents => Self::TooManyComponents, + } + } +} + +pub trait PathExt { + fn new(slices: &[&[u8]]) -> Result; +} + +impl PathExt for Path { + fn new(slices: &[&[u8]]) -> Result { + let component_count = slices.len(); + let total_len = slices.iter().map(|x| x.len()).sum::(); + let iter = slices.iter().map(|c| Component::new(c)).flatten(); + // TODO: Avoid this alloc by adding willow_data_model::Path::try_new_from_iter or such. + let mut iter = iter.collect::>().into_iter(); + let path = willow_data_model::Path::new_from_iter(total_len, &mut iter)?; + if path.get_component_count() != component_count { + Err(InvalidPathError2::ComponentTooLong( + path.get_component_count(), + )) + } else { + Ok(path) + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] +pub struct Entry( + willow_data_model::Entry< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + NamespaceId, + SubspaceId, + PayloadDigest, + >, +); + +#[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] +pub struct AuthorisedEntry( + willow_data_model::AuthorisedEntry< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + NamespaceId, + SubspaceId, + PayloadDigest, + AuthorisationToken, + >, +); + +// pub type Path = willow_data_model::Path; + +// pub type Entry = willow_data_model::Entry< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// NamespaceId, +// SubspaceId, +// PayloadDigest, +// >; + +// pub type AuthorisedEntry = willow_data_model::AuthorisedEntry< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// NamespaceId, +// SubspaceId, +// PayloadDigest, +// AuthorisationToken, +// >; + +impl willow_data_model::PayloadDigest for PayloadDigest {} + +use syncify::syncify; +use syncify::syncify_replace; + +#[syncify(encoding_sync)] +mod encoding { + #[syncify_replace(use ufotofu::sync::{BulkConsumer, BulkProducer};)] + use ufotofu::local_nb::{BulkConsumer, BulkProducer}; + + #[syncify_replace(use willow_encoding::sync::{Decodable, Encodable};)] + use willow_encoding::{Decodable, Encodable}; + + use super::*; + + impl Encodable for PayloadDigest { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(self.0.as_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Decodable for PayloadDigest { + async fn decode( + producer: &mut Producer, + ) -> Result> + where + Producer: BulkProducer, + Self: Sized, + { + let mut bytes = [0u8; DIGEST_LENGTH]; + producer.bulk_overwrite_full_slice(&mut bytes).await?; + Ok(Self(Hash::from_bytes(bytes))) + } + } +} + +// /// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. +// #[derive(Debug, Serialize, Deserialize)] +// pub struct PossiblyAuthorisedEntry(Entry, AuthorisationToken); + +// impl PossiblyAuthorisedEntry { +// pub fn new(entry: Entry, authorisation_token: AuthorisationToken) -> Self { +// Self(entry, authorisation_token) +// } +// pub fn is_authorised(&self) -> bool { +// is_authorised_write(&self.0, &self.1) +// } + +// pub fn authorise(self) -> Result { +// match self.is_authorised() { +// true => Ok(AuthorisedEntry(self.0, self.1)), +// false => Err(Unauthorised), +// } +// } + +// pub fn into_parts(self) -> (Entry, AuthorisationToken) { +// (self.0, self.1) +// } +// } + +// impl TryFrom for AuthorisedEntry { +// type Error = Unauthorised; +// fn try_from(value: PossiblyAuthorisedEntry) -> Result { +// value.authorise() +// } +// } + +// /// An AuthorisedEntry is a PossiblyAuthorisedEntry for which is_authorised_write returns true. +// #[derive(Debug, Serialize, Deserialize, Clone)] +// pub struct AuthorisedEntry(Entry, AuthorisationToken); + +// impl AuthorisedEntry { +// pub fn try_from_parts( +// entry: Entry, +// static_token: StaticToken, +// dynamic_token: DynamicToken, +// ) -> Result { +// let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); +// PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() +// } + +// pub fn entry(&self) -> &Entry { +// &self.0 +// } + +// pub fn into_entry(self) -> Entry { +// self.0 +// } + +// pub fn is_authorised(&self) -> bool { +// true +// } + +// /// Use only if you can assure that the authorisation was previously checked! +// pub fn from_parts_unchecked(entry: Entry, authorisation_token: AuthorisationToken) -> Self { +// Self(entry, authorisation_token) +// } + +// pub fn into_parts(self) -> (Entry, AuthorisationToken) { +// (self.0, self.1) +// } + +// pub fn namespace_id(&self) -> NamespaceId { +// self.1.capability.granted_namespace().into() +// } +// } + +// // TODO: zerocopy support for path +// // #[allow(missing_debug_implementations)] +// // #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// // #[repr(C, packed)] +// // pub struct ComponentRef([u8]); +// // +// // #[allow(missing_debug_implementations)] +// // #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] +// // #[repr(C, packed)] +// // pub struct PathRef([ComponentRef]); +// // pub struct PathRef<'a>(&'a [&'a [u8]]); +// // impl<'a> AsRef> for Path { +// // fn as_ref(&'a self) -> &'a PathRef<'a> { +// // todo!() +// // } +// // } + +// pub mod encodings { +// //! Encoding for Willow entries +// //! +// //! TODO: Verify that these are correct accoring to the spec! These encodings are the message +// //! bytes for authorisation signatures, so we better not need to change them again. + +// use std::io::Write; + +// use bytes::Bytes; + +// use crate::{ +// proto::willow::{NamespaceId, SubspaceId}, +// util::codec::Encoder, +// }; + +// use super::{Entry, Path, DIGEST_LENGTH}; + +// /// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. +// /// We can represent the length of any Component in path_length_power bytes. +// /// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). +// /// +// /// The value `2` means that we can encode paths up to 64KiB long. +// pub const PATH_LENGTH_POWER: usize = 2; +// pub const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; +// pub type UPathLengthPower = u16; +// pub type UPathCountPower = u16; + +// impl Encoder for Path { +// fn encoded_len(&self) -> usize { +// let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; +// let data_len = self.iter().map(Bytes::len).sum::(); +// lengths_len + data_len +// } + +// /// Encode in the format for signatures into a mutable vector. +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// let component_count = self.len() as UPathCountPower; +// out.write_all(&component_count.to_be_bytes())?; +// for component in self.iter() { +// let len = component.len() as UPathLengthPower; +// out.write_all(&len.to_be_bytes())?; +// out.write_all(component)?; +// } +// Ok(()) +// } +// } + +// impl Encoder for entry { +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// out.write_all(self.namespace_id.as_bytes())?; +// out.write_all(self.subspace_id.as_bytes())?; +// self.path.encode_into(out)?; +// out.write_all(&self.timestamp.to_be_bytes())?; +// out.write_all(&self.payload_length.to_be_bytes())?; +// out.write_all(self.payload_digest.as_bytes())?; +// Ok(()) +// } + +// fn encoded_len(&self) -> usize { +// let path_len = self.path.encoded_len(); +// NamespaceId::LENGTH + SubspaceId::LENGTH + path_len + 8 + 8 + DIGEST_LENGTH +// } +// } + +// #[derive(Debug, Clone)] +// pub struct RelativePath<'a> { +// pub path: &'a Path, +// pub reference: &'a Path, +// } +// impl<'a> RelativePath<'a> { +// pub fn new(path: &'a Path, reference: &'a Path) -> Self { +// Self { path, reference } +// } +// } + +// impl<'a> Encoder for RelativePath<'a> { +// fn encoded_len(&self) -> usize { +// let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; +// let remaining_path = self.path.remove_prefix(common_prefix_len as usize); +// PATH_COUNT_POWER + remaining_path.encoded_len() +// } + +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; +// out.write_all(&common_prefix_len.to_be_bytes())?; +// let remaining_path = self.path.remove_prefix(common_prefix_len as usize); +// remaining_path.encode_into(out)?; +// Ok(()) +// } +// } +// } diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 775a02770f..18da71db36 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,529 +1,100 @@ -use std::{cmp::Ordering, io}; +use willow_data_model::grouping::RangeEnd; -use bytes::Bytes; -use serde::{Deserialize, Serialize}; - -use crate::{ - proto::willow::encodings::RelativePath, - util::codec::{compact_width, CompactWidth, Encoder}, -}; - -use super::{ - keys::NamespaceId, - willow::{Entry, Path, SubspaceId, Timestamp}, +use super::data_model::{ + Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, }; -/// A three-dimensional range on a specific namespace. -#[derive(Debug)] -pub struct NamespacedRange { - /// The namespace - pub namespace: NamespaceId, - /// The 3DRange - pub range: ThreeDRange, -} - -/// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. -#[derive(Debug, Serialize, Deserialize, Clone, Hash, Eq, PartialEq)] -pub struct ThreeDRange { - /// Range of [`SubspaceId`] - pub subspaces: Range, - /// Range of [`Path`] - pub paths: Range, - /// Range of [`Timestamp`] - pub times: Range, -} - -impl ThreeDRange { - /// Create a new range from its parts. - pub fn new(subspaces: Range, paths: Range, times: Range) -> Self { - Self { - subspaces, - paths, - times, - } - } - - /// Create a new range that covers everything. - pub fn full() -> Self { - Self::new(Default::default(), Default::default(), Default::default()) - } - - /// Create a new empty range. - pub fn empty() -> Self { - Self::new( - Default::default(), - Default::default(), - Range::new(0, RangeEnd::Closed(0)), - ) - } - - /// Returns `true` if `entry` is included in this range. - pub fn includes_entry(&self, entry: &Entry) -> bool { - self.subspaces.includes(&entry.subspace_id) - && self.paths.includes(&entry.path) - && self.times.includes(&entry.timestamp) - } - - /// Returns `true` if this range is completely empty. - pub fn is_empty(&self) -> bool { - self.subspaces.is_empty() || self.paths.is_empty() || self.times.is_empty() - } - - /// Returns the intersection between `self` and `other`. - pub fn intersection(&self, other: &ThreeDRange) -> Option { - let paths = self.paths.intersection(&other.paths)?; - let times = self.times.intersection(&other.times)?; - let subspaces = self.subspaces.intersection(&other.subspaces)?; - Some(Self { - paths, - times, - subspaces, - }) - } -} - -// pub trait Successor: Sized { -// fn successor(&self) -> Option; -// } -// -// impl Successor for Timestamp { -// fn successor(&self) -> Option { -// self.checked_add(1) -// } -// } - -/// Ranges are simple, one-dimensional ways of grouping Entries. +pub type Range = willow_data_model::grouping::Range; + +// /// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. +// #[derive( +// Debug, Clone, Hash, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, +// )] +// pub struct Three3Range( +// willow_data_model::grouping::Range3d< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// SubspaceId, +// >, +// ); + +/// A grouping of entries. +/// [Definition](https://willowprotocol.org/specs/grouping-entries/index.html#areas). +// #[derive( +// Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, +// )] +// pub struct Area( +// willow_data_model::grouping::Area< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// SubspaceId, +// >, +// ); + +pub type Three3Range = willow_data_model::grouping::Range3d< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + SubspaceId, +>; + +pub type Area = willow_data_model::grouping::Area< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + SubspaceId, +>; + +pub type AreaSubspace = willow_data_model::grouping::AreaSubspace; + +/// A grouping of [`crate::Entry`]s that are among the newest in some [store](https://willowprotocol.org/specs/data-model/index.html#store). /// -/// They can express groupings such as “last week’s Entries”. A range is either a closed range or an open range. -/// A closed range consists of a start value and an end value, an open range consists only of a start value. -/// A range includes all values greater than or equal to its start value and strictly less than its end value -/// (if it is has one). A range is empty if it includes no values. -#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash)] -pub struct Range { - /// A value must be equal or greater than the `start` value to be included in the range. - pub start: T, - /// If [`RangeEnd::Open`], this is an open range. Otherwise, a value must be strictly less than - /// the `end` value to be included in the range. - pub end: RangeEnd, -} - -impl Ord for Range { - fn cmp(&self, other: &Self) -> Ordering { - match self.start.cmp(&other.start) { - Ordering::Less => Ordering::Less, - Ordering::Equal => Ordering::Greater, - Ordering::Greater => self.end.cmp(&other.end), - } - } -} - -impl PartialOrd for Range { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl From<(T, RangeEnd)> for Range { - fn from((start, end): (T, RangeEnd)) -> Self { - Range { start, end } - } -} - -impl Range { - /// Create a new range. - pub fn new(start: T, end: RangeEnd) -> Self { - Self { start, end } - } - - /// Returns `true` if this range is closed. - pub fn is_closed(&self) -> bool { - matches!(self.end, RangeEnd::Closed(_)) - } - - /// Returns `true` if this range is open. - pub fn is_open(&self) -> bool { - matches!(self.end, RangeEnd::Open) - } -} - -impl Range { - /// Create a new range that covers everything. - pub fn full() -> Self { - Self::new(T::default(), RangeEnd::Open) - } -} - -impl Default for Range { - fn default() -> Self { - Self::full() - } -} - -impl Range { - /// Create the intersection between this range and another range. - pub fn intersection(&self, other: &Self) -> Option { - let start = (&self.start).max(&other.start); - let end = match (&self.end, &other.end) { - (RangeEnd::Open, RangeEnd::Closed(b)) => RangeEnd::Closed(b), - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => RangeEnd::Closed(a.min(b)), - (RangeEnd::Closed(a), RangeEnd::Open) => RangeEnd::Closed(a), - (RangeEnd::Open, RangeEnd::Open) => RangeEnd::Open, - }; - match end { - RangeEnd::Open => Some(Self::new(start.clone(), RangeEnd::Open)), - RangeEnd::Closed(t) if t >= start => { - Some(Self::new(start.clone(), RangeEnd::Closed(t.clone()))) - } - RangeEnd::Closed(_) => None, - } - } -} - -impl Range { - /// Returns `true` if this range includes nothing. - pub fn is_empty(&self) -> bool { - match &self.end { - RangeEnd::Open => false, - RangeEnd::Closed(t) => t <= &self.start, - } - } -} - -impl Range { - /// Returns `true` if `value` is included in this range. - pub fn includes(&self, value: &T) -> bool { - value >= &self.start && self.end.includes(value) - } - - /// Returns `true` if `other` range is fully included in this range. - pub fn includes_range(&self, other: &Range) -> bool { - self.start <= other.start && self.end >= other.end - } -} - -/// The end of a range, either open or closed. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Copy, Hash)] -pub enum RangeEnd { - /// Closed end: a value has to be strictly less than the close value to be included. - Closed(T), - /// Open range (no end value) - Open, -} - -impl RangeEnd { - /// Returns `true` if this range is closed. - pub fn is_closed(&self) -> bool { - matches!(self, RangeEnd::Closed(_)) - } - - /// Returns `true` if this range is open. - pub fn is_open(&self) -> bool { - matches!(self, RangeEnd::Open) - } -} - -impl RangeEnd { - pub fn or_max(self, max: T) -> T { - match self { - Self::Closed(value) => value, - Self::Open => max, - } - } -} - -impl PartialOrd for RangeEnd { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for RangeEnd { - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (RangeEnd::Open, RangeEnd::Closed(_)) => Ordering::Greater, - (RangeEnd::Closed(_), RangeEnd::Open) => Ordering::Less, - (RangeEnd::Closed(a), RangeEnd::Closed(b)) => a.cmp(b), - (RangeEnd::Open, RangeEnd::Open) => Ordering::Equal, - } - } -} - -impl RangeEnd { - /// Returns `true` if the range end is open, or if `value` is strictly less than the range end. - pub fn includes(&self, value: &T) -> bool { - match self { - Self::Open => true, - Self::Closed(end) => value < end, - } - } -} - -/// A grouping of Entries that are among the newest in some store. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash, Ord, PartialOrd)] -pub struct AreaOfInterest { - /// To be included in this AreaOfInterest, an Entry must be included in the area. - pub area: Area, - /// To be included in this AreaOfInterest, an Entry’s timestamp must be among the max_count greatest Timestamps, unless max_count is zero. - pub max_count: u64, - /// The total payload_lengths of all included Entries is at most max_size, unless max_size is zero. - pub max_size: u64, -} - -impl From for AreaOfInterest { - fn from(value: Area) -> Self { - Self::new(value) - } -} - -impl AreaOfInterest { - pub fn new(area: Area) -> Self { - Self { - area, - max_count: 0, - max_size: 0, - } - } - /// Create a new [`AreaOfInterest`] that covers everything. - pub fn full() -> Self { - Self { - area: Area::full(), - max_count: 0, - max_size: 0, - } - } +/// [Definition](https://willowprotocol.org/specs/grouping-entries/index.html#aois). +#[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] +pub struct AreaOfInterest( + willow_data_model::grouping::AreaOfInterest< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + SubspaceId, + >, +); - pub fn intersection(&self, other: &AreaOfInterest) -> Option { - let area = self.area.intersection(&other.area)?; - let max_count = match (self.max_count, other.max_count) { - (0, count) => count, - (count, 0) => count, - (a, b) => a.min(b), - }; - let max_size = match (self.max_size, other.max_size) { - (0, size) => size, - (size, 0) => size, - (a, b) => a.min(b), - }; - Some(Self { - area, - max_count, - max_size, - }) - } +pub trait AreaExt { + fn includes_point(&self, point: &Point) -> bool; + fn new_path(path: Path) -> Area; } -/// A grouping of Entries. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct Area { - /// To be included in this Area, an Entry’s subspace_id must be equal to the subspace_id, unless it is any. - pub subspace: SubspaceArea, - /// To be included in this Area, an Entry’s path must be prefixed by the path. - pub path: Path, - /// To be included in this Area, an Entry’s timestamp must be included in the times. - pub times: Range, -} - -impl Area { - pub const fn new(subspace: SubspaceArea, path: Path, times: Range) -> Self { - Self { - subspace, - path, - times, - } - } - - pub fn full() -> Self { - Self::new(SubspaceArea::Any, Path::empty(), Range::::FULL) - } - - pub fn empty() -> Self { - Self::new(SubspaceArea::Any, Path::empty(), Range::::EMPTY) - } - - pub fn path(path: Path) -> Self { - Self::new(SubspaceArea::Any, path, Default::default()) - } - - pub fn subspace(subspace_id: SubspaceId) -> Self { - Self::new( - SubspaceArea::Id(subspace_id), - Path::empty(), - Range::::FULL, - ) - } - - pub fn includes_entry(&self, entry: &Entry) -> bool { - self.includes(&entry.subspace_id, &entry.path, &entry.timestamp) - } - - pub fn includes(&self, subspace_id: &SubspaceId, path: &Path, timestamp: &Timestamp) -> bool { - self.subspace.includes_subspace(subspace_id) - && self.path.is_prefix_of(path) - && self.times.includes(timestamp) - } - - pub fn includes_point(&self, point: &Point) -> bool { - self.includes(&point.subspace_id, &point.path, &point.timestamp) - } - - pub fn includes_area(&self, other: &Area) -> bool { - self.subspace.includes(&other.subspace) - && self.path.is_prefix_of(&other.path) - && self.times.includes_range(&other.times) +impl AreaExt for Area { + fn includes_point(&self, point: &Point) -> bool { + self.includes_area(&point.into_area()) } - pub fn has_intersection(&self, other: &Area) -> bool { - self.includes_area(other) || other.includes_area(self) - } - - pub fn includes_range(&self, range: &ThreeDRange) -> bool { - let path_start = self.path.is_prefix_of(&range.paths.start); - let path_end = match &range.paths.end { - RangeEnd::Open => true, - RangeEnd::Closed(path) => self.path.is_prefix_of(path), - }; - let subspace_start = self.subspace.includes_subspace(&range.subspaces.start); - let subspace_end = match range.subspaces.end { - RangeEnd::Open => true, - RangeEnd::Closed(subspace) => self.subspace.includes_subspace(&subspace), - }; - subspace_start - && subspace_end - && path_start - && path_end - && self.times.includes_range(&range.times) - } - - pub fn into_range(&self) -> ThreeDRange { - let subspace_start = match self.subspace { - SubspaceArea::Any => SubspaceId::default(), - SubspaceArea::Id(id) => id, - }; - let subspace_end = match self.subspace { - SubspaceArea::Any => RangeEnd::Open, - SubspaceArea::Id(id) => subspace_range_end(id), - }; - let path_start = self.path.clone(); - let path_end = path_range_end(&self.path); - ThreeDRange { - subspaces: Range::new(subspace_start, subspace_end), - paths: Range::new(path_start, path_end), - times: self.times, - } - } - - pub fn intersection(&self, other: &Area) -> Option { - let subspace_id = self.subspace.intersection(&other.subspace)?; - let path = self.path.intersection(&other.path)?; - let times = self.times.intersection(&other.times)?; - Some(Self { - subspace: subspace_id, - times, - path, - }) - } -} - -pub fn path_range_end(path: &Path) -> RangeEnd { - if path.is_empty() { - RangeEnd::Open - } else { - let mut out = vec![]; - for component in path.iter().rev() { - // component can be incremented - if out.is_empty() && component.iter().any(|x| *x != 0xff) { - let mut bytes = component.to_vec(); - let incremented = increment_by_one(&mut bytes); - debug_assert!(incremented, "checked above"); - out.push(Bytes::from(bytes)); - break; - // component cannot be incremented - } else if out.is_empty() { - continue; - } else { - out.push(component.clone()) - } - } - if out.is_empty() { - RangeEnd::Open - } else { - out.reverse(); - RangeEnd::Closed(Path::new_unchecked(out)) - } - } -} - -pub fn subspace_range_end(id: SubspaceId) -> RangeEnd { - let mut bytes = id.to_bytes(); - if increment_by_one(&mut bytes) { - RangeEnd::Closed(SubspaceId::from_bytes_unchecked(bytes)) - } else { - RangeEnd::Open - } -} - -/// Increment a byte string by one, by incrementing the last byte that is not 255 by one. -/// -/// Returns false if all bytes are 255. -fn increment_by_one(value: &mut [u8]) -> bool { - for char in value.iter_mut().rev() { - if *char != 255 { - *char += 1; - return true; - } else { - *char = 0; - } + fn new_path(path: Path) -> Self { + Self::new(AreaSubspace::Any, path, Range::full()) } - false } -impl Range { - pub const FULL: Self = Self { - start: 0, - end: RangeEnd::Open, - }; - - pub const EMPTY: Self = Self { - start: 0, - end: RangeEnd::Closed(0), - }; -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub enum SubspaceArea { - Any, - Id(SubspaceId), -} - -impl SubspaceArea { - pub fn is_any(&self) -> bool { - matches!(self, SubspaceArea::Any) - } +// impl Area { +// /// Create a new [`Area`]. +// pub fn new(subspace: AreaSubspace, path: Path, times: Range) -> Self { +// Self(willow_data_model::grouping::Area::new( +// subspace, +// path.into(), +// times, +// )) +// } - fn includes(&self, other: &SubspaceArea) -> bool { - match (self, other) { - (SubspaceArea::Any, SubspaceArea::Any) => true, - (SubspaceArea::Id(_), SubspaceArea::Any) => false, - (_, SubspaceArea::Id(id)) => self.includes_subspace(id), - } - } - fn includes_subspace(&self, subspace_id: &SubspaceId) -> bool { - match self { - Self::Any => true, - Self::Id(id) => id == subspace_id, - } - } +// pub fn includes_point(&self, point: &Point) -> bool { +// self.includes_area(&point.into_area()) +// } - fn intersection(&self, other: &Self) -> Option { - match (self, other) { - (Self::Any, Self::Any) => Some(Self::Any), - (Self::Id(a), Self::Any) => Some(Self::Id(*a)), - (Self::Any, Self::Id(b)) => Some(Self::Id(*b)), - (Self::Id(a), Self::Id(b)) if a == b => Some(Self::Id(*a)), - (Self::Id(_a), Self::Id(_b)) => None, - } - } -} +// pub fn path(path: Path) -> Self { +// Self::new(AreaSubspace::Any, path, Range::full()) +// } +// } /// A single point in the 3D range space. /// @@ -545,128 +116,18 @@ impl Point { } pub fn from_entry(entry: &Entry) -> Self { Self { - path: entry.path.clone(), - timestamp: entry.timestamp, - subspace_id: entry.subspace_id, + path: entry.path().clone().into(), + timestamp: entry.timestamp(), + subspace_id: *entry.subspace_id(), } } pub fn into_area(&self) -> Area { - let times = Range::new(self.timestamp, RangeEnd::Closed(self.timestamp + 1)); - Area::new(SubspaceArea::Id(self.subspace_id), self.path.clone(), times) - } -} - -#[derive(thiserror::Error, Debug)] -#[error("area is not included in outer area")] -pub struct NotIncluded; - -#[derive(Debug, Clone)] -pub struct AreaInArea<'a> { - a: &'a Area, - out: &'a Area, -} - -impl<'a> AreaInArea<'a> { - pub fn new(inner: &'a Area, outer: &'a Area) -> Result { - if outer.includes_area(inner) { - Ok(Self { - a: inner, - out: outer, - }) - } else { - Err(NotIncluded) - } - } - fn start_diff(&self) -> u64 { - let a = self.a.times; - let out = self.out.times; - Ord::min( - a.start.saturating_sub(out.start), - out.end.or_max(Timestamp::MAX) - a.start, - ) - } - - fn end_diff(&self) -> u64 { - let a = self.a.times; - let out = self.out.times; - Ord::min( - a.end.or_max(Timestamp::MAX).saturating_sub(out.start), - out.end - .or_max(Timestamp::MAX) - .saturating_sub(a.end.or_max(Timestamp::MAX)), - ) - } -} - -impl<'a> Encoder for AreaInArea<'a> { - fn encoded_len(&self) -> usize { - let subspace_is_same = self.a.subspace == self.out.subspace; - let mut len = 1; - if !subspace_is_same { - len += SubspaceId::LENGTH; - } - let relative_path = RelativePath::new(&self.a.path, &self.out.path); - len += relative_path.encoded_len(); - len += CompactWidth(self.start_diff()).encoded_len(); - if self.a.times.end.is_closed() { - len += CompactWidth(self.end_diff()).encoded_len(); - } - len - } - - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - let mut bits = 0u8; - let subspace_is_same = self.a.subspace == self.out.subspace; - if !subspace_is_same { - bits |= 0b0000_0001; - } - if self.a.times.is_open() { - bits |= 0b0000_0010; - } - let start_diff = self.start_diff(); - let end_diff = self.start_diff(); - if start_diff == self.a.times.start.saturating_sub(self.out.times.start) { - bits |= 0b0000_0100; - } - if end_diff - == self - .a - .times - .end - .or_max(Timestamp::MAX) - .saturating_sub(self.a.times.start) - { - bits |= 0b0000_1000; - } - if let 4 | 8 = compact_width(start_diff) { - bits |= 0b0001_0000; - } - if let 2 | 8 = compact_width(start_diff) { - bits |= 0b0010_0000; - } - if let 4 | 8 = compact_width(end_diff) { - bits |= 0b0100_0000; - } - if let 2 | 8 = compact_width(end_diff) { - bits |= 0b1000_0000; - } - out.write_all(&[bits])?; - match self.a.subspace { - SubspaceArea::Any => { - debug_assert!(subspace_is_same, "outers subspace must be any"); - } - SubspaceArea::Id(subspace_id) => { - out.write_all(subspace_id.as_bytes())?; - } - } - let relative_path = RelativePath::new(&self.a.path, &self.out.path); - relative_path.encode_into(out)?; - CompactWidth(start_diff).encode_into(out)?; - if self.a.times.end.is_closed() { - CompactWidth(end_diff).encode_into(out)?; - } - Ok(()) + let times = Range { + start: self.timestamp, + end: RangeEnd::Closed(self.timestamp + 1), + }; + Area::new(AreaSubspace::Id(self.subspace_id), self.path.clone(), times) } } @@ -674,14 +135,17 @@ impl<'a> Encoder for AreaInArea<'a> { mod tests { use std::collections::HashSet; - use crate::proto::{grouping::Area, willow::Path}; + use crate::proto::{ + data_model::{Path, PathExt}, + grouping::{Area, AreaExt}, + }; #[test] fn area_eq() { let p1 = Path::new(&[b"foo", b"bar"]).unwrap(); - let a1 = Area::path(p1); + let a1 = Area::new_path(p1); let p2 = Path::new(&[b"foo", b"bar"]).unwrap(); - let a2 = Area::path(p2); + let a2 = Area::new_path(p2); assert_eq!(a1, a2); let mut set = HashSet::new(); set.insert(a1.clone()); diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 98ad061ff2..4a10212520 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -8,7 +8,7 @@ use std::{cmp::Ordering, fmt, str::FromStr}; use derive_more::{AsRef, Deref, From, Into}; -use ed25519_dalek::{SignatureError, Signer, SigningKey, VerifyingKey}; +use ed25519_dalek::{SignatureError, Signer, SigningKey, Verifier, VerifyingKey}; use iroh_base::base32; use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; @@ -553,3 +553,160 @@ impl FromStr for NamespaceId { NamespacePublicKey::from_str(s).map(|x| x.into()) } } + +mod willow_impls { + use crate::util::increment_by_one; + + use super::*; + + impl willow_data_model::SubspaceId for UserId { + fn successor(&self) -> Option { + match increment_by_one(self.as_bytes()) { + Some(bytes) => Some(Self::from_bytes_unchecked(bytes)), + None => None, + } + } + } + + impl willow_data_model::SubspaceId for UserPublicKey { + fn successor(&self) -> Option { + match increment_by_one(self.as_bytes()) { + Some(bytes) => Self::from_bytes(&bytes).ok(), + None => None, + } + } + } + + impl willow_data_model::NamespaceId for NamespaceId {} + impl willow_data_model::NamespaceId for NamespacePublicKey {} + + impl Verifier for UserPublicKey { + fn verify( + &self, + msg: &[u8], + signature: &UserSignature, + ) -> Result<(), ed25519_dalek::ed25519::Error> { + self.0.verify(msg, &signature.0) + } + } + + impl Verifier for UserId { + fn verify( + &self, + msg: &[u8], + signature: &UserSignature, + ) -> Result<(), ed25519_dalek::ed25519::Error> { + // TODO: Cache this in a global LRU cache. + let key = self.into_public_key()?; + key.0.verify(msg, &signature.0) + } + } + + impl Verifier for NamespacePublicKey { + fn verify( + &self, + msg: &[u8], + signature: &NamespaceSignature, + ) -> Result<(), ed25519_dalek::ed25519::Error> { + self.0.verify(msg, &signature.0) + } + } + + impl Verifier for NamespaceId { + fn verify( + &self, + msg: &[u8], + signature: &NamespaceSignature, + ) -> Result<(), ed25519_dalek::ed25519::Error> { + // TODO: Cache this in a global LRU cache. + let key = self.into_public_key()?; + key.0.verify(msg, &signature.0) + } + } +} + +use syncify::syncify; +use syncify::syncify_replace; + +#[syncify(encoding_sync)] +mod encoding { + #[syncify_replace(use ufotofu::sync::BulkConsumer;)] + use ufotofu::local_nb::BulkConsumer; + + #[syncify_replace(use willow_encoding::sync::Encodable;)] + use willow_encoding::Encodable; + + use super::*; + + impl Encodable for NamespacePublicKey { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(self.as_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Encodable for UserPublicKey { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(self.as_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Encodable for NamespaceSignature { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(&self.to_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Encodable for UserSignature { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(&self.to_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Encodable for UserId { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(self.as_bytes()) + .await + .map_err(|err| err.reason) + } + } + + impl Encodable for NamespaceId { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + consumer + .bulk_consume_full_slice(self.as_bytes()) + .await + .map_err(|err| err.reason) + } + } +} diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 83e882e78a..e0a65b4e61 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,14 +1,6 @@ -use std::{io::Write, sync::Arc}; +use super::keys; -use serde::{Deserialize, Serialize}; - -use crate::{proto::grouping::NotIncluded, util::codec::Encoder}; - -use super::{ - grouping::{Area, AreaInArea}, - keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH}, - willow::{AuthorisedEntry, Entry, Unauthorised}, -}; +use willow_data_model::AuthorisationToken; pub type UserPublicKey = keys::UserPublicKey; pub type NamespacePublicKey = keys::NamespacePublicKey; @@ -17,720 +9,783 @@ pub type NamespaceId = keys::NamespaceId; pub type UserSignature = keys::UserSignature; pub type NamespaceSignature = keys::NamespaceSignature; +use super::data_model::{Entry, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH}; + #[derive(Debug, derive_more::From)] pub enum SecretKey { - User(UserSecretKey), - Namespace(NamespaceSecretKey), -} - -pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { - let (capability, signature) = token.as_parts(); - - capability.is_valid() - && capability.access_mode() == AccessMode::ReadWrite - && capability.granted_area().includes_entry(entry) - && capability - .receiver() - // TODO: This allocates each time, avoid - .verify(&entry.encode().expect("encoding not to fail"), signature) - .is_ok() -} - -pub fn create_token( - entry: &Entry, - capability: McCapability, - secret_key: &UserSecretKey, -) -> MeadowcapAuthorisationToken { - // TODO: This allocates each time, avoid - let signable = entry.encode().expect("encoding not to fail"); - let signature = secret_key.sign(&signable); - MeadowcapAuthorisationToken::from_parts(capability, signature) -} - -pub fn attach_authorisation( - entry: Entry, - capability: McCapability, - secret_key: &UserSecretKey, -) -> Result { - if capability.access_mode() != AccessMode::ReadWrite - || capability.granted_namespace().id() != entry.namespace_id - || !capability.granted_area().includes_entry(&entry) - || capability.receiver() != &secret_key.public_key() - { - return Err(InvalidParams); - } - let token = create_token(&entry, capability, secret_key); - Ok(AuthorisedEntry::from_parts_unchecked(entry, token)) -} - -#[derive(Debug, thiserror::Error)] -#[error("invalid parameters")] -pub struct InvalidParams; - -#[derive(Debug, thiserror::Error)] -#[error("invalid capability")] -pub struct InvalidCapability; - -/// To be used as an AuthorisationToken for Willow. -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] -pub struct MeadowcapAuthorisationToken { - /// Certifies that an Entry may be written. - pub capability: McCapability, - /// Proves that the Entry was created by the receiver of the capability. - pub signature: UserSignature, -} - -// TODO: We clone these a bunch where it wouldn't be needed if we could create a reference type to -// which the [`MeadowcapAuthorisationToken`] would deref to, but I couldn't make it work nice -// enough. -// #[derive(Debug, Clone, Eq, PartialEq)] -// pub struct MeadowcapAuthorisationTokenRef<'a> { -// /// Certifies that an Entry may be written. -// pub capability: &'a McCapability, -// /// Proves that the Entry was created by the receiver of the capability. -// pub signature: &'a UserSignature, -// } - -impl MeadowcapAuthorisationToken { - pub fn from_parts(capability: McCapability, signature: UserSignature) -> Self { - Self { - capability, - signature, - } - } - pub fn as_parts(&self) -> (&McCapability, &UserSignature) { - (&self.capability, &self.signature) - } - - pub fn into_parts(self) -> (McCapability, UserSignature) { - (self.capability, self.signature) - } -} - -impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { - fn from((capability, signature): (McCapability, UserSignature)) -> Self { - Self::from_parts(capability, signature) - } -} - -#[derive(Debug, Clone, derive_more::Deref, derive_more::Into)] -pub struct ValidatedCapability(McCapability); - -impl ValidatedCapability { - pub fn new(cap: McCapability) -> Result { - if cap.is_valid() { - Ok(Self(cap)) - } else { - Err(InvalidCapability) - } - } - - pub fn is_valid(&self) -> bool { - true - } - - pub fn new_unchecked(cap: McCapability) -> Self { - Self(cap) - } + User(keys::UserSecretKey), + Namespace(keys::NamespaceSecretKey), } -#[derive( - Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, derive_more::From, -)] -pub enum McCapability { - Communal(Arc), - Owned(Arc), -} - -impl McCapability { - pub fn new_owned( - namespace_secret: &NamespaceSecretKey, - user_key: UserPublicKey, - access_mode: AccessMode, - ) -> Self { - McCapability::Owned(Arc::new(OwnedCapability::new( - namespace_secret, - user_key, - access_mode, - ))) - } - - pub fn new_communal( - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - access_mode: AccessMode, - ) -> Self { - McCapability::Communal(Arc::new(CommunalCapability::new( - namespace_key, - user_key, - access_mode, - ))) - } - pub fn access_mode(&self) -> AccessMode { - match self { - Self::Communal(cap) => cap.access_mode, - Self::Owned(cap) => cap.access_mode, - } - } - pub fn receiver(&self) -> &UserPublicKey { - match self { - Self::Communal(cap) => cap.receiver(), - Self::Owned(cap) => cap.receiver(), - } - } - - pub fn granted_namespace(&self) -> &NamespacePublicKey { - match self { - Self::Communal(cap) => cap.granted_namespace(), - Self::Owned(cap) => cap.granted_namespace(), - } - } - - pub fn granted_area(&self) -> Area { - match self { - Self::Communal(cap) => cap.granted_area(), - Self::Owned(cap) => cap.granted_area(), - } - } - - pub fn try_granted_area(&self, area: &Area) -> Result<(), Unauthorised> { - if !self.granted_area().includes_area(area) { - Err(Unauthorised) - } else { - Ok(()) - } - } - - pub fn is_valid(&self) -> bool { - match self { - Self::Communal(cap) => cap.is_valid(), - Self::Owned(cap) => cap.is_valid(), - } - } - // pub fn validate(&self) -> Result<(), InvalidCapability> { - pub fn validate(&self) -> anyhow::Result<()> { - match self { - Self::Communal(cap) => cap.validate(), - Self::Owned(cap) => cap.validate(), - } - } - - pub fn delegations(&self) -> &[Delegation] { - match self { - Self::Communal(cap) => &cap.delegations, - Self::Owned(cap) => &cap.delegations, - } - } - - /// Returns `true` if `self` covers a larger area than `other`, - /// or if covers the same area and has less delegations. - pub fn is_wider_than(&self, other: &Self) -> bool { - (self.granted_area().includes_area(&other.granted_area())) - || (self.granted_area() == other.granted_area() - && self.delegations().len() < other.delegations().len()) - } - - pub fn delegate( - &self, - user_secret: &UserSecretKey, - new_user: UserPublicKey, - new_area: Area, - ) -> anyhow::Result { - let cap = match self { - Self::Communal(cap) => { - Self::Communal(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) - } - Self::Owned(cap) => { - Self::Owned(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) - } - }; - Ok(cap) - } -} - -impl Encoder for McCapability { - // TODO: Use spec-compliant encoding instead of postcard. - fn encoded_len(&self) -> usize { - postcard::experimental::serialized_size(&self).unwrap() - } - - // TODO: Use spec-compliant encoding instead of postcard. - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - postcard::to_io(&self, out)?; - Ok(()) +pub type McCapability = meadowcap::McCapability< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + keys::NamespaceId, + keys::NamespaceSignature, + keys::UserId, + keys::UserSignature, +>; + +pub type McAuthorisationToken = meadowcap::McAuthorisationToken< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + keys::NamespaceId, + keys::NamespaceSignature, + keys::UserId, + keys::UserSignature, +>; + +impl meadowcap::IsCommunal for NamespaceId { + fn is_communal(&self) -> bool { + self.as_bytes()[31] == 0 } } -impl Encoder for McSubspaceCapability { - // TODO: Use spec-compliant encoding instead of postcard. - fn encoded_len(&self) -> usize { - postcard::experimental::serialized_size(&self).unwrap() - } - - // TODO: Use spec-compliant encoding instead of postcard. - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - postcard::to_io(&self, out)?; - Ok(()) - } +pub fn is_authorised_write(entry: &Entry, token: &McAuthorisationToken) -> bool { + token.is_authorised_write(entry) } -#[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] pub enum AccessMode { ReadOnly, ReadWrite, } -/// A capability that authorizes reads or writes in communal namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct CommunalCapability { - /// The kind of access this grants. - access_mode: AccessMode, - /// The namespace in which this grants access. - namespace_key: NamespacePublicKey, - /// The subspace for which and to whom this grants access. - /// - /// Remember that we assume SubspaceId and UserPublicKey to be the same types. - user_key: UserPublicKey, - /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. - delegations: Vec, -} +// use std::{io::Write, sync::Arc}; -impl CommunalCapability { - pub fn new( - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - access_mode: AccessMode, - ) -> Self { - Self { - access_mode, - namespace_key, - user_key, - delegations: Default::default(), - } - } - pub fn receiver(&self) -> &UserPublicKey { - match self.delegations.last() { - None => &self.user_key, - Some(Delegation(_, user_key, _)) => user_key, - } - } +// use serde::{Deserialize, Serialize}; - pub fn granted_namespace(&self) -> &NamespacePublicKey { - &self.namespace_key - } +// use crate::{proto::grouping::NotIncluded, util::codec::Encoder}; - pub fn granted_area(&self) -> Area { - match self.delegations.last() { - None => Area::subspace(self.user_key.into()), - Some(Delegation(area, _, _)) => area.clone(), - } - } +// use super::{ +// grouping::{Area, AreaInArea}, +// keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH}, +// willow::{AuthorisedEntry, Entry, Unauthorised}, +// }; - pub fn is_valid(&self) -> bool { - self.validate().is_ok() - } +// pub type UserPublicKey = keys::UserPublicKey; +// pub type NamespacePublicKey = keys::NamespacePublicKey; +// pub type UserId = keys::UserId; +// pub type NamespaceId = keys::NamespaceId; +// pub type UserSignature = keys::UserSignature; +// pub type NamespaceSignature = keys::NamespaceSignature; - pub fn validate(&self) -> anyhow::Result<()> { - if self.delegations.is_empty() { - // communal capabilities without delegations are always valid - Ok(()) - } else { - let mut prev = None; - let mut prev_receiver = &self.user_key; - for delegation in self.delegations.iter() { - let Delegation(new_area, new_user, new_signature) = &delegation; - let signable = self.handover(prev, new_area, new_user)?; - prev_receiver.verify(&signable, new_signature)?; - prev = Some((new_area, new_signature)); - prev_receiver = new_user; - } - Ok(()) - } - } +// #[derive(Debug, derive_more::From)] +// pub enum SecretKey { +// User(UserSecretKey), +// Namespace(NamespaceSecretKey), +// } - pub fn delegate( - &self, - user_secret: &UserSecretKey, - new_user: UserPublicKey, - new_area: Area, - ) -> anyhow::Result { - if user_secret.public_key() != *self.receiver() { - anyhow::bail!("Secret key does not match receiver of current capability"); - } - let prev = self - .delegations - .last() - .map(|Delegation(area, _user_key, sig)| (area, sig)); - let handover = self.handover(prev, &new_area, &new_user)?; - let signature = user_secret.sign(&handover); - let delegation = Delegation(new_area, new_user, signature); - let mut cap = self.clone(); - cap.delegations.push(delegation); - Ok(cap) - } +// pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { +// let (capability, signature) = token.as_parts(); + +// capability.is_valid() +// && capability.access_mode() == AccessMode::ReadWrite +// && capability.granted_area().includes_entry(entry) +// && capability +// .receiver() +// // TODO: This allocates each time, avoid +// .verify(&entry.encode().expect("encoding not to fail"), signature) +// .is_ok() +// } - fn handover( - &self, - prev: Option<(&Area, &UserSignature)>, - new_area: &Area, - new_user: &UserPublicKey, - ) -> anyhow::Result> { - match prev { - None => self.initial_handover(new_area, new_user), - Some((prev_area, prev_signature)) => Handover::new( - prev_area, - PrevSignature::User(prev_signature), - new_area, - new_user, - )? - .encode(), - } - } +// pub fn create_token( +// entry: &Entry, +// capability: McCapability, +// secret_key: &UserSecretKey, +// ) -> MeadowcapAuthorisationToken { +// // TODO: This allocates each time, avoid +// let signable = entry.encode().expect("encoding not to fail"); +// let signature = secret_key.sign(&signable); +// MeadowcapAuthorisationToken::from_parts(capability, signature) +// } - fn initial_handover( - &self, - new_area: &Area, - new_user: &UserPublicKey, - ) -> anyhow::Result> { - let prev_area = Area::subspace(self.user_key.into()); - let area_in_area = AreaInArea::new(new_area, &prev_area)?; - let len = - 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; - let mut out = std::io::Cursor::new(vec![0u8; len]); - let init = match self.access_mode { - AccessMode::ReadOnly => 0x00, - AccessMode::ReadWrite => 0x01, - }; - out.write_all(&[init])?; - out.write_all(&self.namespace_key.to_bytes())?; - area_in_area.encode_into(&mut out)?; - out.write_all(&new_user.to_bytes())?; - Ok(out.into_inner()) - } -} +// pub fn attach_authorisation( +// entry: Entry, +// capability: McCapability, +// secret_key: &UserSecretKey, +// ) -> Result { +// if capability.access_mode() != AccessMode::ReadWrite +// || capability.granted_namespace().id() != entry.namespace_id +// || !capability.granted_area().includes_entry(&entry) +// || capability.receiver() != &secret_key.public_key() +// { +// return Err(InvalidParams); +// } +// let token = create_token(&entry, capability, secret_key); +// Ok(AuthorisedEntry::from_parts_unchecked(entry, token)) +// } -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] -pub struct Delegation(Area, UserPublicKey, UserSignature); - -/// A capability that authorizes reads or writes in owned namespaces. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct OwnedCapability { - /// The kind of access this grants. - access_mode: AccessMode, - /// The namespace for which this grants access. - namespace_key: NamespacePublicKey, - /// The user to whom this grants access; granting access for the full namespace_key, not just to a subspace. - user_key: UserPublicKey, - /// Authorisation of the user_key by the namespace_key., - initial_authorisation: NamespaceSignature, - /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. - delegations: Vec, -} +// #[derive(Debug, thiserror::Error)] +// #[error("invalid parameters")] +// pub struct InvalidParams; -impl OwnedCapability { - pub fn new( - namespace_secret_key: &NamespaceSecretKey, - user_key: UserPublicKey, - access_mode: AccessMode, - ) -> Self { - let namespace_key = namespace_secret_key.public_key(); - let handover = Self::initial_handover(access_mode, &user_key); - let initial_authorisation = namespace_secret_key.sign(&handover); - Self { - access_mode, - namespace_key, - user_key, - initial_authorisation, - delegations: Default::default(), - } - } +// #[derive(Debug, thiserror::Error)] +// #[error("invalid capability")] +// pub struct InvalidCapability; - pub fn receiver(&self) -> &UserPublicKey { - match self.delegations.last() { - None => &self.user_key, - Some(Delegation(_, user_key, _)) => user_key, - } - } +// /// To be used as an AuthorisationToken for Willow. +// #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +// pub struct MeadowcapAuthorisationToken { +// /// Certifies that an Entry may be written. +// pub capability: McCapability, +// /// Proves that the Entry was created by the receiver of the capability. +// pub signature: UserSignature, +// } - pub fn granted_namespace(&self) -> &NamespacePublicKey { - &self.namespace_key - } +// // TODO: We clone these a bunch where it wouldn't be needed if we could create a reference type to +// // which the [`MeadowcapAuthorisationToken`] would deref to, but I couldn't make it work nice +// // enough. +// // #[derive(Debug, Clone, Eq, PartialEq)] +// // pub struct MeadowcapAuthorisationTokenRef<'a> { +// // /// Certifies that an Entry may be written. +// // pub capability: &'a McCapability, +// // /// Proves that the Entry was created by the receiver of the capability. +// // pub signature: &'a UserSignature, +// // } + +// impl MeadowcapAuthorisationToken { +// pub fn from_parts(capability: McCapability, signature: UserSignature) -> Self { +// Self { +// capability, +// signature, +// } +// } +// pub fn as_parts(&self) -> (&McCapability, &UserSignature) { +// (&self.capability, &self.signature) +// } + +// pub fn into_parts(self) -> (McCapability, UserSignature) { +// (self.capability, self.signature) +// } +// } - pub fn granted_area(&self) -> Area { - match self.delegations.last() { - None => Area::full(), - Some(Delegation(area, _, _)) => area.clone(), - } - } +// impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { +// fn from((capability, signature): (McCapability, UserSignature)) -> Self { +// Self::from_parts(capability, signature) +// } +// } - pub fn is_valid(&self) -> bool { - self.validate().is_ok() - } +// #[derive(Debug, Clone, derive_more::Deref, derive_more::Into)] +// pub struct ValidatedCapability(McCapability); + +// impl ValidatedCapability { +// pub fn new(cap: McCapability) -> Result { +// if cap.is_valid() { +// Ok(Self(cap)) +// } else { +// Err(InvalidCapability) +// } +// } + +// pub fn is_valid(&self) -> bool { +// true +// } + +// pub fn new_unchecked(cap: McCapability) -> Self { +// Self(cap) +// } +// } - pub fn validate(&self) -> anyhow::Result<()> { - // verify root authorisation - let handover = Self::initial_handover(self.access_mode, &self.user_key); - self.namespace_key - .verify(&handover, &self.initial_authorisation)?; - - // no delegations: done - if self.delegations.is_empty() { - return Ok(()); - } - - let initial_area = Area::full(); - let mut prev = ( - &initial_area, - &self.user_key, - PrevSignature::Namespace(&self.initial_authorisation), - ); - for delegation in self.delegations.iter() { - let (prev_area, prev_user, prev_signature) = prev; - let Delegation(new_area, new_user, new_signature) = delegation; - let handover = - Handover::new(prev_area, prev_signature, new_area, new_user)?.encode()?; - prev_user.verify(&handover, new_signature)?; - prev = (new_area, new_user, PrevSignature::User(new_signature)); - } - Ok(()) - } +// #[derive( +// Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, derive_more::From, +// )] +// pub enum McCapability { +// Communal(Arc), +// Owned(Arc), +// } - fn initial_handover( - access_mode: AccessMode, - user_key: &UserPublicKey, - ) -> [u8; PUBLIC_KEY_LENGTH + 1] { - let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; - // https://willowprotocol.org/specs/meadowcap/index.html#owned_cap_valid - // An OwnedCapability with zero delegations is valid if initial_authorisation - // is a NamespaceSignature issued by the namespace_key over - // either the byte 0x02 (if access_mode is read) - // or the byte 0x03 (if access_mode is write), - // followed by the user_key (encoded via encode_user_pk). - signable[0] = match access_mode { - AccessMode::ReadOnly => 0x02, - AccessMode::ReadWrite => 0x03, - }; - signable[1..].copy_from_slice(user_key.as_bytes()); - signable - } +// impl McCapability { +// pub fn new_owned( +// namespace_secret: &NamespaceSecretKey, +// user_key: UserPublicKey, +// access_mode: AccessMode, +// ) -> Self { +// McCapability::Owned(Arc::new(OwnedCapability::new( +// namespace_secret, +// user_key, +// access_mode, +// ))) +// } + +// pub fn new_communal( +// namespace_key: NamespacePublicKey, +// user_key: UserPublicKey, +// access_mode: AccessMode, +// ) -> Self { +// McCapability::Communal(Arc::new(CommunalCapability::new( +// namespace_key, +// user_key, +// access_mode, +// ))) +// } +// pub fn access_mode(&self) -> AccessMode { +// match self { +// Self::Communal(cap) => cap.access_mode, +// Self::Owned(cap) => cap.access_mode, +// } +// } +// pub fn receiver(&self) -> &UserPublicKey { +// match self { +// Self::Communal(cap) => cap.receiver(), +// Self::Owned(cap) => cap.receiver(), +// } +// } + +// pub fn granted_namespace(&self) -> &NamespacePublicKey { +// match self { +// Self::Communal(cap) => cap.granted_namespace(), +// Self::Owned(cap) => cap.granted_namespace(), +// } +// } + +// pub fn granted_area(&self) -> Area { +// match self { +// Self::Communal(cap) => cap.granted_area(), +// Self::Owned(cap) => cap.granted_area(), +// } +// } + +// pub fn try_granted_area(&self, area: &Area) -> Result<(), Unauthorised> { +// if !self.granted_area().includes_area(area) { +// Err(Unauthorised) +// } else { +// Ok(()) +// } +// } + +// pub fn is_valid(&self) -> bool { +// match self { +// Self::Communal(cap) => cap.is_valid(), +// Self::Owned(cap) => cap.is_valid(), +// } +// } +// // pub fn validate(&self) -> Result<(), InvalidCapability> { +// pub fn validate(&self) -> anyhow::Result<()> { +// match self { +// Self::Communal(cap) => cap.validate(), +// Self::Owned(cap) => cap.validate(), +// } +// } + +// pub fn delegations(&self) -> &[Delegation] { +// match self { +// Self::Communal(cap) => &cap.delegations, +// Self::Owned(cap) => &cap.delegations, +// } +// } + +// /// Returns `true` if `self` covers a larger area than `other`, +// /// or if covers the same area and has less delegations. +// pub fn is_wider_than(&self, other: &Self) -> bool { +// (self.granted_area().includes_area(&other.granted_area())) +// || (self.granted_area() == other.granted_area() +// && self.delegations().len() < other.delegations().len()) +// } + +// pub fn delegate( +// &self, +// user_secret: &UserSecretKey, +// new_user: UserPublicKey, +// new_area: Area, +// ) -> anyhow::Result { +// let cap = match self { +// Self::Communal(cap) => { +// Self::Communal(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) +// } +// Self::Owned(cap) => { +// Self::Owned(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) +// } +// }; +// Ok(cap) +// } +// } - pub fn delegate( - &self, - secret_key: &UserSecretKey, - new_user: UserPublicKey, - new_area: Area, - ) -> anyhow::Result { - if secret_key.public_key() != *self.receiver() { - anyhow::bail!("Secret key does not match receiver of current capability"); - } - let prev_signature = match self.delegations.last() { - None => PrevSignature::Namespace(&self.initial_authorisation), - Some(Delegation(_, _, prev_signature)) => PrevSignature::User(prev_signature), - }; - let prev_area = self.granted_area(); - let handover = Handover::new(&prev_area, prev_signature, &new_area, &new_user)?; - let signable = handover.encode()?; - let signature = secret_key.sign(&signable); - let delegation = Delegation(new_area, new_user, signature); - let mut cap = self.clone(); - cap.delegations.push(delegation); - Ok(cap) - } -} +// impl Encoder for McCapability { +// // TODO: Use spec-compliant encoding instead of postcard. +// fn encoded_len(&self) -> usize { +// postcard::experimental::serialized_size(&self).unwrap() +// } + +// // TODO: Use spec-compliant encoding instead of postcard. +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// postcard::to_io(&self, out)?; +// Ok(()) +// } +// } -#[derive(Debug)] -enum PrevSignature<'a> { - User(&'a UserSignature), - Namespace(&'a NamespaceSignature), -} +// impl Encoder for McSubspaceCapability { +// // TODO: Use spec-compliant encoding instead of postcard. +// fn encoded_len(&self) -> usize { +// postcard::experimental::serialized_size(&self).unwrap() +// } + +// // TODO: Use spec-compliant encoding instead of postcard. +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// postcard::to_io(&self, out)?; +// Ok(()) +// } +// } -impl<'a> PrevSignature<'a> { - fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] { - match self { - Self::User(sig) => sig.to_bytes(), - Self::Namespace(sig) => sig.to_bytes(), - } - } -} +// #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] +// pub enum AccessMode { +// ReadOnly, +// ReadWrite, +// } -#[derive(Debug)] -struct Handover<'a> { - prev_signature: PrevSignature<'a>, - new_user: &'a UserPublicKey, - area_in_area: AreaInArea<'a>, -} +// /// A capability that authorizes reads or writes in communal namespaces. +// #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +// pub struct CommunalCapability { +// /// The kind of access this grants. +// access_mode: AccessMode, +// /// The namespace in which this grants access. +// namespace_key: NamespacePublicKey, +// /// The subspace for which and to whom this grants access. +// /// +// /// Remember that we assume SubspaceId and UserPublicKey to be the same types. +// user_key: UserPublicKey, +// /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. +// delegations: Vec, +// } -impl<'a> Handover<'a> { - fn new( - prev_area: &'a Area, - prev_signature: PrevSignature<'a>, - new_area: &'a Area, - new_user: &'a UserPublicKey, - ) -> Result { - let area_in_area = AreaInArea::new(new_area, prev_area)?; - Ok(Self { - area_in_area, - prev_signature, - new_user, - }) - } -} +// impl CommunalCapability { +// pub fn new( +// namespace_key: NamespacePublicKey, +// user_key: UserPublicKey, +// access_mode: AccessMode, +// ) -> Self { +// Self { +// access_mode, +// namespace_key, +// user_key, +// delegations: Default::default(), +// } +// } +// pub fn receiver(&self) -> &UserPublicKey { +// match self.delegations.last() { +// None => &self.user_key, +// Some(Delegation(_, user_key, _)) => user_key, +// } +// } + +// pub fn granted_namespace(&self) -> &NamespacePublicKey { +// &self.namespace_key +// } + +// pub fn granted_area(&self) -> Area { +// match self.delegations.last() { +// None => Area::subspace(self.user_key.into()), +// Some(Delegation(area, _, _)) => area.clone(), +// } +// } + +// pub fn is_valid(&self) -> bool { +// self.validate().is_ok() +// } + +// pub fn validate(&self) -> anyhow::Result<()> { +// if self.delegations.is_empty() { +// // communal capabilities without delegations are always valid +// Ok(()) +// } else { +// let mut prev = None; +// let mut prev_receiver = &self.user_key; +// for delegation in self.delegations.iter() { +// let Delegation(new_area, new_user, new_signature) = &delegation; +// let signable = self.handover(prev, new_area, new_user)?; +// prev_receiver.verify(&signable, new_signature)?; +// prev = Some((new_area, new_signature)); +// prev_receiver = new_user; +// } +// Ok(()) +// } +// } + +// pub fn delegate( +// &self, +// user_secret: &UserSecretKey, +// new_user: UserPublicKey, +// new_area: Area, +// ) -> anyhow::Result { +// if user_secret.public_key() != *self.receiver() { +// anyhow::bail!("Secret key does not match receiver of current capability"); +// } +// let prev = self +// .delegations +// .last() +// .map(|Delegation(area, _user_key, sig)| (area, sig)); +// let handover = self.handover(prev, &new_area, &new_user)?; +// let signature = user_secret.sign(&handover); +// let delegation = Delegation(new_area, new_user, signature); +// let mut cap = self.clone(); +// cap.delegations.push(delegation); +// Ok(cap) +// } + +// fn handover( +// &self, +// prev: Option<(&Area, &UserSignature)>, +// new_area: &Area, +// new_user: &UserPublicKey, +// ) -> anyhow::Result> { +// match prev { +// None => self.initial_handover(new_area, new_user), +// Some((prev_area, prev_signature)) => Handover::new( +// prev_area, +// PrevSignature::User(prev_signature), +// new_area, +// new_user, +// )? +// .encode(), +// } +// } + +// fn initial_handover( +// &self, +// new_area: &Area, +// new_user: &UserPublicKey, +// ) -> anyhow::Result> { +// let prev_area = Area::subspace(self.user_key.into()); +// let area_in_area = AreaInArea::new(new_area, &prev_area)?; +// let len = +// 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; +// let mut out = std::io::Cursor::new(vec![0u8; len]); +// let init = match self.access_mode { +// AccessMode::ReadOnly => 0x00, +// AccessMode::ReadWrite => 0x01, +// }; +// out.write_all(&[init])?; +// out.write_all(&self.namespace_key.to_bytes())?; +// area_in_area.encode_into(&mut out)?; +// out.write_all(&new_user.to_bytes())?; +// Ok(out.into_inner()) +// } +// } -impl<'a> Encoder for Handover<'a> { - fn encoded_len(&self) -> usize { - self.area_in_area.encoded_len() + NamespaceSignature::LENGTH + UserId::LENGTH - } - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - self.area_in_area.encode_into(out)?; - out.write_all(&self.prev_signature.to_bytes())?; - out.write_all(&self.new_user.to_bytes())?; - Ok(()) - } -} +// #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] +// pub struct Delegation(Area, UserPublicKey, UserSignature); + +// /// A capability that authorizes reads or writes in owned namespaces. +// #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +// pub struct OwnedCapability { +// /// The kind of access this grants. +// access_mode: AccessMode, +// /// The namespace for which this grants access. +// namespace_key: NamespacePublicKey, +// /// The user to whom this grants access; granting access for the full namespace_key, not just to a subspace. +// user_key: UserPublicKey, +// /// Authorisation of the user_key by the namespace_key., +// initial_authorisation: NamespaceSignature, +// /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. +// delegations: Vec, +// } -#[derive( - Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From, Ord, PartialOrd, -)] -/// A capability that certifies read access to arbitrary SubspaceIds at some unspecified Path. -pub struct McSubspaceCapability { - /// The namespace for which this grants access. - pub namespace_key: NamespacePublicKey, +// impl OwnedCapability { +// pub fn new( +// namespace_secret_key: &NamespaceSecretKey, +// user_key: UserPublicKey, +// access_mode: AccessMode, +// ) -> Self { +// let namespace_key = namespace_secret_key.public_key(); +// let handover = Self::initial_handover(access_mode, &user_key); +// let initial_authorisation = namespace_secret_key.sign(&handover); +// Self { +// access_mode, +// namespace_key, +// user_key, +// initial_authorisation, +// delegations: Default::default(), +// } +// } + +// pub fn receiver(&self) -> &UserPublicKey { +// match self.delegations.last() { +// None => &self.user_key, +// Some(Delegation(_, user_key, _)) => user_key, +// } +// } + +// pub fn granted_namespace(&self) -> &NamespacePublicKey { +// &self.namespace_key +// } + +// pub fn granted_area(&self) -> Area { +// match self.delegations.last() { +// None => Area::full(), +// Some(Delegation(area, _, _)) => area.clone(), +// } +// } + +// pub fn is_valid(&self) -> bool { +// self.validate().is_ok() +// } + +// pub fn validate(&self) -> anyhow::Result<()> { +// // verify root authorisation +// let handover = Self::initial_handover(self.access_mode, &self.user_key); +// self.namespace_key +// .verify(&handover, &self.initial_authorisation)?; + +// // no delegations: done +// if self.delegations.is_empty() { +// return Ok(()); +// } + +// let initial_area = Area::full(); +// let mut prev = ( +// &initial_area, +// &self.user_key, +// PrevSignature::Namespace(&self.initial_authorisation), +// ); +// for delegation in self.delegations.iter() { +// let (prev_area, prev_user, prev_signature) = prev; +// let Delegation(new_area, new_user, new_signature) = delegation; +// let handover = +// Handover::new(prev_area, prev_signature, new_area, new_user)?.encode()?; +// prev_user.verify(&handover, new_signature)?; +// prev = (new_area, new_user, PrevSignature::User(new_signature)); +// } +// Ok(()) +// } + +// fn initial_handover( +// access_mode: AccessMode, +// user_key: &UserPublicKey, +// ) -> [u8; PUBLIC_KEY_LENGTH + 1] { +// let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; +// // https://willowprotocol.org/specs/meadowcap/index.html#owned_cap_valid +// // An OwnedCapability with zero delegations is valid if initial_authorisation +// // is a NamespaceSignature issued by the namespace_key over +// // either the byte 0x02 (if access_mode is read) +// // or the byte 0x03 (if access_mode is write), +// // followed by the user_key (encoded via encode_user_pk). +// signable[0] = match access_mode { +// AccessMode::ReadOnly => 0x02, +// AccessMode::ReadWrite => 0x03, +// }; +// signable[1..].copy_from_slice(user_key.as_bytes()); +// signable +// } + +// pub fn delegate( +// &self, +// secret_key: &UserSecretKey, +// new_user: UserPublicKey, +// new_area: Area, +// ) -> anyhow::Result { +// if secret_key.public_key() != *self.receiver() { +// anyhow::bail!("Secret key does not match receiver of current capability"); +// } +// let prev_signature = match self.delegations.last() { +// None => PrevSignature::Namespace(&self.initial_authorisation), +// Some(Delegation(_, _, prev_signature)) => PrevSignature::User(prev_signature), +// }; +// let prev_area = self.granted_area(); +// let handover = Handover::new(&prev_area, prev_signature, &new_area, &new_user)?; +// let signable = handover.encode()?; +// let signature = secret_key.sign(&signable); +// let delegation = Delegation(new_area, new_user, signature); +// let mut cap = self.clone(); +// cap.delegations.push(delegation); +// Ok(cap) +// } +// } - /// The user to whom this grants access. - pub user_key: UserPublicKey, +// #[derive(Debug)] +// enum PrevSignature<'a> { +// User(&'a UserSignature), +// Namespace(&'a NamespaceSignature), +// } - /// Authorisation of the user_key by the namespace_key. - pub initial_authorisation: NamespaceSignature, +// impl<'a> PrevSignature<'a> { +// fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] { +// match self { +// Self::User(sig) => sig.to_bytes(), +// Self::Namespace(sig) => sig.to_bytes(), +// } +// } +// } - /// Successive authorisations of new UserPublicKeys. - pub delegations: Vec<(UserPublicKey, UserSignature)>, -} +// #[derive(Debug)] +// struct Handover<'a> { +// prev_signature: PrevSignature<'a>, +// new_user: &'a UserPublicKey, +// area_in_area: AreaInArea<'a>, +// } -impl McSubspaceCapability { - pub fn new(namespace_secret_key: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { - let namespace_key = namespace_secret_key.public_key(); - let handover = Self::initial_handover(&user_key); - let initial_authorisation = namespace_secret_key.sign(&handover); - Self { - namespace_key, - user_key, - initial_authorisation, - delegations: Default::default(), - } - } - pub fn receiver(&self) -> &UserPublicKey { - &self.user_key - } +// impl<'a> Handover<'a> { +// fn new( +// prev_area: &'a Area, +// prev_signature: PrevSignature<'a>, +// new_area: &'a Area, +// new_user: &'a UserPublicKey, +// ) -> Result { +// let area_in_area = AreaInArea::new(new_area, prev_area)?; +// Ok(Self { +// area_in_area, +// prev_signature, +// new_user, +// }) +// } +// } - pub fn granted_namespace(&self) -> &NamespacePublicKey { - &self.namespace_key - } +// impl<'a> Encoder for Handover<'a> { +// fn encoded_len(&self) -> usize { +// self.area_in_area.encoded_len() + NamespaceSignature::LENGTH + UserId::LENGTH +// } +// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { +// self.area_in_area.encode_into(out)?; +// out.write_all(&self.prev_signature.to_bytes())?; +// out.write_all(&self.new_user.to_bytes())?; +// Ok(()) +// } +// } - pub fn validate(&self) -> anyhow::Result<()> { - let signable = Self::initial_handover(&self.user_key); - self.namespace_key - .verify(&signable, &self.initial_authorisation)?; - - if self.delegations.is_empty() { - return Ok(()); - } - - let mut prev = ( - &self.user_key, - PrevSignature::Namespace(&self.initial_authorisation), - ); - for delegation in &self.delegations { - let (prev_user, prev_signature) = prev; - let (new_user, new_signature) = delegation; - let handover = Self::handover(prev_signature, new_user); - prev_user.verify(&handover, new_signature)?; - prev = (new_user, PrevSignature::User(new_signature)); - } - Ok(()) - } +// #[derive( +// Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From, Ord, PartialOrd, +// )] +// /// A capability that certifies read access to arbitrary SubspaceIds at some unspecified Path. +// pub struct McSubspaceCapability { +// /// The namespace for which this grants access. +// pub namespace_key: NamespacePublicKey, - pub fn is_valid(&self) -> bool { - self.validate().is_ok() - } +// /// The user to whom this grants access. +// pub user_key: UserPublicKey, - pub fn delegate( - &self, - secret_key: &UserSecretKey, - new_user: UserPublicKey, - ) -> anyhow::Result { - if secret_key.public_key() != *self.receiver() { - anyhow::bail!("Secret key does not match receiver of current capability"); - } - let prev_signature = match self.delegations.last() { - None => PrevSignature::Namespace(&self.initial_authorisation), - Some((_, prev_signature)) => PrevSignature::User(prev_signature), - }; - let handover = Self::handover(prev_signature, &new_user); - let signature = secret_key.sign(&handover); - let delegation = (new_user, signature); - let mut cap = self.clone(); - cap.delegations.push(delegation); - Ok(cap) - } +// /// Authorisation of the user_key by the namespace_key. +// pub initial_authorisation: NamespaceSignature, - fn handover( - prev_signature: PrevSignature, - new_user: &UserPublicKey, - ) -> [u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH] { - let mut out = [0u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH]; - out[..SIGNATURE_LENGTH].copy_from_slice(&prev_signature.to_bytes()); - out[SIGNATURE_LENGTH..].copy_from_slice(new_user.as_bytes()); - out - } +// /// Successive authorisations of new UserPublicKeys. +// pub delegations: Vec<(UserPublicKey, UserSignature)>, +// } - fn initial_handover(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { - let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; - // A McSubspaceCapability with zero delegations is valid if initial_authorisation - // is a NamespaceSignature issued by the namespace_key over the byte 0x02, - // followed by the user_key (encoded via encode_user_pk). - // via https://willowprotocol.org/specs/pai/index.html#subspace_cap_valid - signable[0] = 0x02; - signable[1..].copy_from_slice(user_key.as_bytes()); - signable - } -} +// impl McSubspaceCapability { +// pub fn new(namespace_secret_key: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { +// let namespace_key = namespace_secret_key.public_key(); +// let handover = Self::initial_handover(&user_key); +// let initial_authorisation = namespace_secret_key.sign(&handover); +// Self { +// namespace_key, +// user_key, +// initial_authorisation, +// delegations: Default::default(), +// } +// } +// pub fn receiver(&self) -> &UserPublicKey { +// &self.user_key +// } + +// pub fn granted_namespace(&self) -> &NamespacePublicKey { +// &self.namespace_key +// } + +// pub fn validate(&self) -> anyhow::Result<()> { +// let signable = Self::initial_handover(&self.user_key); +// self.namespace_key +// .verify(&signable, &self.initial_authorisation)?; + +// if self.delegations.is_empty() { +// return Ok(()); +// } + +// let mut prev = ( +// &self.user_key, +// PrevSignature::Namespace(&self.initial_authorisation), +// ); +// for delegation in &self.delegations { +// let (prev_user, prev_signature) = prev; +// let (new_user, new_signature) = delegation; +// let handover = Self::handover(prev_signature, new_user); +// prev_user.verify(&handover, new_signature)?; +// prev = (new_user, PrevSignature::User(new_signature)); +// } +// Ok(()) +// } + +// pub fn is_valid(&self) -> bool { +// self.validate().is_ok() +// } + +// pub fn delegate( +// &self, +// secret_key: &UserSecretKey, +// new_user: UserPublicKey, +// ) -> anyhow::Result { +// if secret_key.public_key() != *self.receiver() { +// anyhow::bail!("Secret key does not match receiver of current capability"); +// } +// let prev_signature = match self.delegations.last() { +// None => PrevSignature::Namespace(&self.initial_authorisation), +// Some((_, prev_signature)) => PrevSignature::User(prev_signature), +// }; +// let handover = Self::handover(prev_signature, &new_user); +// let signature = secret_key.sign(&handover); +// let delegation = (new_user, signature); +// let mut cap = self.clone(); +// cap.delegations.push(delegation); +// Ok(cap) +// } + +// fn handover( +// prev_signature: PrevSignature, +// new_user: &UserPublicKey, +// ) -> [u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH] { +// let mut out = [0u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH]; +// out[..SIGNATURE_LENGTH].copy_from_slice(&prev_signature.to_bytes()); +// out[SIGNATURE_LENGTH..].copy_from_slice(new_user.as_bytes()); +// out +// } + +// fn initial_handover(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { +// let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; +// // A McSubspaceCapability with zero delegations is valid if initial_authorisation +// // is a NamespaceSignature issued by the namespace_key over the byte 0x02, +// // followed by the user_key (encoded via encode_user_pk). +// // via https://willowprotocol.org/specs/pai/index.html#subspace_cap_valid +// signable[0] = 0x02; +// signable[1..].copy_from_slice(user_key.as_bytes()); +// signable +// } +// } -#[cfg(test)] -mod tests { - use rand_core::SeedableRng; - - use crate::proto::{ - grouping::Area, - keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, - }; - - use super::{AccessMode, McCapability}; - - #[test] - fn delegate_owned() { - let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); - let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); - let alfie_secret = UserSecretKey::generate(&mut rng); - let betty_secret = UserSecretKey::generate(&mut rng); - let alfie_public = alfie_secret.public_key(); - let betty_public = betty_secret.public_key(); - let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::ReadWrite); - cap.validate().expect("cap to be valid"); - let cap_betty = cap - .delegate(&alfie_secret, betty_public, Area::full()) - .expect("not to fail"); - cap_betty.validate().expect("cap to be valid"); - let conny_secret = UserSecretKey::generate(&mut rng); - let conny_public = conny_secret.public_key(); - let cap_conny = cap_betty - .delegate( - &betty_secret, - conny_public, - Area::subspace(conny_public.id()), - ) - .expect("not to fail"); - cap_conny.validate().expect("cap to be valid"); - assert_eq!(cap_conny.granted_area(), Area::subspace(conny_public.id())); - assert_eq!(cap_conny.receiver(), &conny_public); - } -} +// #[cfg(test)] +// mod tests { +// use rand_core::SeedableRng; + +// use crate::proto::{ +// grouping::Area, +// keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, +// }; + +// use super::{AccessMode, McCapability}; + +// #[test] +// fn delegate_owned() { +// let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); +// let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); +// let alfie_secret = UserSecretKey::generate(&mut rng); +// let betty_secret = UserSecretKey::generate(&mut rng); +// let alfie_public = alfie_secret.public_key(); +// let betty_public = betty_secret.public_key(); +// let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::ReadWrite); +// cap.validate().expect("cap to be valid"); +// let cap_betty = cap +// .delegate(&alfie_secret, betty_public, Area::full()) +// .expect("not to fail"); +// cap_betty.validate().expect("cap to be valid"); +// let conny_secret = UserSecretKey::generate(&mut rng); +// let conny_public = conny_secret.public_key(); +// let cap_conny = cap_betty +// .delegate( +// &betty_secret, +// conny_public, +// Area::subspace(conny_public.id()), +// ) +// .expect("not to fail"); +// cap_conny.validate().expect("cap to be valid"); +// assert_eq!(cap_conny.granted_area(), Area::subspace(conny_public.id())); +// assert_eq!(cap_conny.receiver(), &conny_public); +// } +// } diff --git a/iroh-willow/src/proto/pai.rs b/iroh-willow/src/proto/pai.rs index 361da84590..600f4704b4 100644 --- a/iroh-willow/src/proto/pai.rs +++ b/iroh-willow/src/proto/pai.rs @@ -8,16 +8,16 @@ //! [RFC 9380]: https://www.rfc-editor.org/rfc/rfc9380 use curve25519_dalek::{ristretto::CompressedRistretto, RistrettoPoint, Scalar}; +use ufotofu::sync::consumer::IntoVec; +use willow_encoding::sync::Encodable; -use crate::{ - proto::{ - grouping::SubspaceArea, - sync::ReadCapability, - willow::{NamespaceId, Path, SubspaceId}, - }, - util::codec::Encoder, +use crate::proto::{ + data_model::{NamespaceId, Path, SubspaceId}, + grouping::AreaSubspace, }; +type ReadCapability = super::meadowcap::McCapability; + #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct PsiGroup(RistrettoPoint); @@ -45,7 +45,13 @@ pub struct PaiScheme; impl PaiScheme { pub fn hash_into_group(fragment: &Fragment) -> PsiGroup { - let encoded = fragment.encode().expect("encoding not to fail"); + let encoded = { + let mut consumer = IntoVec::::new(); + fragment + .encode(&mut consumer) + .expect("encoding not to fail"); + consumer.into_vec() + }; let point = RistrettoPoint::hash_from_bytes::(&encoded); PsiGroup(point) } @@ -64,13 +70,13 @@ impl PaiScheme { pub fn get_fragment_kit(cap: &ReadCapability) -> FragmentKit { let granted_area = cap.granted_area(); - let granted_namespace = cap.granted_namespace().id(); - let granted_path = granted_area.path.clone(); + let granted_namespace = cap.granted_namespace(); + let granted_path = granted_area.path().clone(); - match granted_area.subspace { - SubspaceArea::Any => FragmentKit::Complete(granted_namespace, granted_path), - SubspaceArea::Id(granted_subspace) => { - FragmentKit::Selective(granted_namespace, granted_subspace, granted_path) + match granted_area.subspace() { + AreaSubspace::Any => FragmentKit::Complete(*granted_namespace, granted_path), + AreaSubspace::Id(granted_subspace) => { + FragmentKit::Selective(*granted_namespace, *granted_subspace, granted_path) } } } @@ -83,38 +89,13 @@ pub enum Fragment { } impl Fragment { - pub fn into_parts(self) -> (NamespaceId, SubspaceArea, Path) { - match self { - Fragment::Pair((namespace_id, path)) => (namespace_id, SubspaceArea::Any, path), - Fragment::Triple((namespace_id, subspace_id, path)) => { - (namespace_id, SubspaceArea::Id(subspace_id), path) - } - } - } -} - -impl Encoder for Fragment { - fn encoded_len(&self) -> usize { - match self { - Fragment::Pair((_, path)) => NamespaceId::LENGTH + path.encoded_len(), - Fragment::Triple((_, _, path)) => { - NamespaceId::LENGTH + SubspaceId::LENGTH + path.encoded_len() - } - } - } - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + pub fn into_parts(self) -> (NamespaceId, AreaSubspace, Path) { match self { - Fragment::Pair((namespace_id, path)) => { - out.write_all(namespace_id.as_bytes())?; - path.encode_into(out)?; - } + Fragment::Pair((namespace_id, path)) => (namespace_id, AreaSubspace::Any, path), Fragment::Triple((namespace_id, subspace_id, path)) => { - out.write_all(namespace_id.as_bytes())?; - out.write_all(subspace_id.as_bytes())?; - path.encode_into(out)?; + (namespace_id, AreaSubspace::Id(subspace_id), path) } } - Ok(()) } } @@ -161,14 +142,12 @@ impl FragmentKit { FragmentSet::Complete(pairs) } FragmentKit::Selective(namespace_id, subspace_id, path) => { - let all_prefixes = path.all_prefixes(); - let primary = all_prefixes - .iter() - .cloned() + let primary = path + .all_prefixes() .map(|prefix| (namespace_id, subspace_id, prefix)) .collect(); - let secondary = all_prefixes - .into_iter() + let secondary = path + .all_prefixes() .map(|prefix| (namespace_id, prefix)) .collect(); FragmentSet::Selective { primary, secondary } @@ -176,3 +155,37 @@ impl FragmentKit { } } } + +use syncify::syncify; +use syncify::syncify_replace; + +#[syncify(encoding_sync)] +mod encoding { + #[syncify_replace(use ufotofu::sync::BulkConsumer;)] + use ufotofu::local_nb::BulkConsumer; + + #[syncify_replace(use willow_encoding::sync::Encodable;)] + use willow_encoding::Encodable; + + use super::*; + + impl Encodable for Fragment { + async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> + where + Consumer: BulkConsumer, + { + match self { + Fragment::Pair((namespace_id, path)) => { + namespace_id.encode(consumer).await?; + path.encode(consumer).await?; + } + Fragment::Triple((namespace_id, subspace_id, path)) => { + namespace_id.encode(consumer).await?; + subspace_id.encode(consumer).await?; + path.encode(consumer).await?; + } + } + Ok(()) + } + } +} diff --git a/iroh-willow/src/proto/willow.rs b/iroh-willow/src/proto/willow.rs deleted file mode 100644 index 29f2e729c6..0000000000 --- a/iroh-willow/src/proto/willow.rs +++ /dev/null @@ -1,484 +0,0 @@ -use std::{cmp::Ordering, sync::Arc}; - -use bytes::Bytes; -use iroh_base::hash::Hash; -use serde::{Deserialize, Serialize}; - -use crate::util::time::system_time_now; - -use super::{ - keys::{self, UserSecretKey}, - meadowcap::{self, attach_authorisation, is_authorised_write, InvalidParams, McCapability}, - sync::{DynamicToken, StaticToken}, -}; - -/// A type for identifying namespaces. -pub type NamespaceId = keys::NamespaceId; - -/// A type for identifying subspaces. -pub type SubspaceId = keys::UserId; - -/// The capability type needed to authorize writes. -pub type WriteCapability = McCapability; - -/// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). -/// Timestamps are to be interpreted as a time in microseconds since the Unix epoch. -pub type Timestamp = u64; - -/// A totally ordered type for content-addressing the data that Willow stores. -pub type PayloadDigest = Hash; - -/// The type of components of a [`Path`]. -pub type Component = Bytes; - -// A for proving write permission. -pub type AuthorisationToken = meadowcap::MeadowcapAuthorisationToken; - -/// A natural number for limiting the length of path components. -pub const MAX_COMPONENT_LENGTH: usize = 4096; - -/// A natural number for limiting the number of path components. -pub const MAX_COMPONENT_COUNT: usize = 1024; - -/// A natural number max_path_length for limiting the overall size of paths. -pub const MAX_PATH_LENGTH: usize = 4096; - -/// The byte length of a [`PayloadDigest`]. -pub const DIGEST_LENGTH: usize = 32; - -/// Error returned for entries that are not authorised. -/// -/// See [`is_authorised_write`] for details. -#[derive(Debug, thiserror::Error)] -#[error("Entry is not authorised")] -pub struct Unauthorised; - -/// Error returned for invalid paths. -#[derive(Debug, thiserror::Error)] -#[error("Entry is not authorised")] -pub enum InvalidPath { - #[error("Component with index {0} exceeds the maximum component length")] - ComponentTooLong(usize), - #[error("The path exceeds the maximum component length")] - PathTooLong, - #[error("The path exceeds the maximum component count")] - TooManyComponents, -} - -/// A [`Path`] is a sequence of at most [`MAX_COMPONENT_COUNT`] many bytestrings, -/// each of at most [`MAX_COMPONENT_LENGTH`] bytes, and whose total number of bytes -/// is at most [`MAX_PATH_LENGTH`]. -/// -/// The bytestrings that make up a [`Path`] are called its [`Component`]s. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] -pub struct Path(Arc<[Component]>); - -impl Default for Path { - fn default() -> Self { - Path::empty() - } -} -impl Path { - pub fn new(components: &[&[u8]]) -> Result { - Self::validate(components)?; - let components: Vec = components - .iter() - .map(|c| Bytes::copy_from_slice(c)) - .collect(); - Ok(Self::new_unchecked(components)) - } - - pub fn new_unchecked(components: Vec) -> Self { - let path: Arc<[Component]> = components.into(); - Path(path) - } - - pub fn from_components(components: &[Component]) -> Self { - let path: Arc<[Component]> = components.to_vec().into(); - Self(path) - } - - pub fn validate(components: &[&[u8]]) -> Result<(), InvalidPath> { - if components.len() > MAX_COMPONENT_COUNT { - return Err(InvalidPath::TooManyComponents); - } - let mut total_len = 0; - for (i, component) in components.iter().enumerate() { - let len = component.len(); - if len > MAX_COMPONENT_LENGTH { - return Err(InvalidPath::ComponentTooLong(i)); - } - total_len += len; - } - if total_len > MAX_PATH_LENGTH { - return Err(InvalidPath::PathTooLong); - } - Ok(()) - } - - /// A `Path` `s` is a prefix of a `Path` `t` if the first [`Component`]s of `t` are exactly the `Component`s of `s`. - pub fn is_prefix_of(&self, other: &Path) -> bool { - other.0.starts_with(&self.0) - } - - /// Create an empty path. - pub fn empty() -> Self { - Self(Arc::new([])) - } - - pub fn intersection(&self, other: &Path) -> Option { - if self.is_prefix_of(other) { - Some(other.clone()) - } else if other.is_prefix_of(self) { - Some(self.clone()) - } else { - None - } - // if self.is_prefix_of(other) { - // Some(self.clone()) - // } else if other.is_prefix_of(self) { - // Some(other.clone()) - // } else { - // None - // } - } - - pub fn common_prefix(&self, other: &Path) -> &[Component] { - &self[..self.common_prefix_len(other)] - } - - pub fn common_prefix_len(&self, other: &Path) -> usize { - self.iter() - .zip(other.iter()) - .take_while(|(a, b)| a == b) - .count() - } - - pub fn remove_prefix(&self, count: usize) -> Path { - let start = count.min(self.len()); - Self::new_unchecked(self[start..].to_vec()) - } - - pub fn component_count(&self) -> usize { - self.0.len() - } - - pub fn components(&self) -> &[Component] { - &self.0 - } - - pub fn all_prefixes(&self) -> Vec { - let mut out = vec![Path::empty()]; - let components = self.components(); - if components.is_empty() { - return out; - } - for i in 1..=components.len() { - let prefix = Path::from_components(&components[..i]); - out.push(prefix); - } - out - } -} - -impl std::ops::Deref for Path { - type Target = [Component]; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl PartialOrd for Path { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Path { - fn cmp(&self, other: &Self) -> Ordering { - for (i, component) in self.iter().enumerate() { - match other.get(i) { - Some(other_component) => match component.cmp(other_component) { - Ordering::Equal => continue, - ordering => return ordering, - }, - None => return Ordering::Greater, - } - } - Ordering::Equal - } -} - -/// The metadata for storing a Payload. -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Hash)] -pub struct Entry { - /// The identifier of the namespace to which the Entry belongs. - pub namespace_id: NamespaceId, - /// The identifier of the subspace to which the Entry belongs. - pub subspace_id: SubspaceId, - /// The Path to which the Entry was written. - pub path: Path, - /// The claimed creation time of the Entry. - /// - /// Wall-clock timestamps may come as a surprise. We are cognisant of their limitations, - /// and use them anyway. To learn why, please see Timestamps, really? - pub timestamp: Timestamp, - /// The length of the Payload in bytes. - pub payload_length: u64, - /// The result of applying hash_payload to the Payload. - pub payload_digest: PayloadDigest, -} - -impl Entry { - pub fn new( - namespace_id: NamespaceId, - subspace_id: SubspaceId, - path: Path, - timestamp: u64, - payload_digest: PayloadDigest, - payload_length: u64, - ) -> Self { - Self { - namespace_id, - subspace_id, - path, - timestamp, - payload_length, - payload_digest, - } - } - pub fn new_current( - namespace_id: NamespaceId, - subspace_id: SubspaceId, - path: Path, - payload_digest: PayloadDigest, - payload_length: u64, - ) -> Self { - let timestamp = system_time_now(); - Self::new( - namespace_id, - subspace_id, - path, - timestamp, - payload_digest, - payload_length, - ) - } - pub fn is_newer_than(&self, other: &Entry) -> bool { - other.timestamp < self.timestamp - || (other.timestamp == self.timestamp && other.payload_digest < self.payload_digest) - || (other.timestamp == self.timestamp - && other.payload_digest == self.payload_digest - && other.payload_length < self.payload_length) - } - - pub fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path) { - (&self.namespace_id, &self.subspace_id, &self.path) - } - - pub fn attach_authorisation( - self, - capability: WriteCapability, - secret_key: &UserSecretKey, - ) -> Result { - attach_authorisation(self, capability, secret_key) - } -} - -impl PartialOrd for Entry { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Entry { - fn cmp(&self, other: &Self) -> Ordering { - self.namespace_id - .cmp(&other.namespace_id) - .then(self.subspace_id.cmp(&other.subspace_id)) - .then(self.path.cmp(&other.path)) - .then(self.timestamp.cmp(&other.timestamp)) - .then(self.payload_digest.cmp(&other.payload_digest)) - .then(self.payload_length.cmp(&other.payload_length)) - } -} - -/// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. -#[derive(Debug, Serialize, Deserialize)] -pub struct PossiblyAuthorisedEntry(Entry, AuthorisationToken); - -impl PossiblyAuthorisedEntry { - pub fn new(entry: Entry, authorisation_token: AuthorisationToken) -> Self { - Self(entry, authorisation_token) - } - pub fn is_authorised(&self) -> bool { - is_authorised_write(&self.0, &self.1) - } - - pub fn authorise(self) -> Result { - match self.is_authorised() { - true => Ok(AuthorisedEntry(self.0, self.1)), - false => Err(Unauthorised), - } - } - - pub fn into_parts(self) -> (Entry, AuthorisationToken) { - (self.0, self.1) - } -} - -impl TryFrom for AuthorisedEntry { - type Error = Unauthorised; - fn try_from(value: PossiblyAuthorisedEntry) -> Result { - value.authorise() - } -} - -/// An AuthorisedEntry is a PossiblyAuthorisedEntry for which is_authorised_write returns true. -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct AuthorisedEntry(Entry, AuthorisationToken); - -impl AuthorisedEntry { - pub fn try_from_parts( - entry: Entry, - static_token: StaticToken, - dynamic_token: DynamicToken, - ) -> Result { - let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); - PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() - } - - pub fn entry(&self) -> &Entry { - &self.0 - } - - pub fn into_entry(self) -> Entry { - self.0 - } - - pub fn is_authorised(&self) -> bool { - true - } - - /// Use only if you can assure that the authorisation was previously checked! - pub fn from_parts_unchecked(entry: Entry, authorisation_token: AuthorisationToken) -> Self { - Self(entry, authorisation_token) - } - - pub fn into_parts(self) -> (Entry, AuthorisationToken) { - (self.0, self.1) - } - - pub fn namespace_id(&self) -> NamespaceId { - self.1.capability.granted_namespace().into() - } -} - -// TODO: zerocopy support for path -// #[allow(missing_debug_implementations)] -// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// #[repr(C, packed)] -// pub struct ComponentRef([u8]); -// -// #[allow(missing_debug_implementations)] -// #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// #[repr(C, packed)] -// pub struct PathRef([ComponentRef]); -// pub struct PathRef<'a>(&'a [&'a [u8]]); -// impl<'a> AsRef> for Path { -// fn as_ref(&'a self) -> &'a PathRef<'a> { -// todo!() -// } -// } - -pub mod encodings { - //! Encoding for Willow entries - //! - //! TODO: Verify that these are correct accoring to the spec! These encodings are the message - //! bytes for authorisation signatures, so we better not need to change them again. - - use std::io::Write; - - use bytes::Bytes; - - use crate::{ - proto::willow::{NamespaceId, SubspaceId}, - util::codec::Encoder, - }; - - use super::{Entry, Path, DIGEST_LENGTH}; - - /// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. - /// We can represent the length of any Component in path_length_power bytes. - /// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). - /// - /// The value `2` means that we can encode paths up to 64KiB long. - pub const PATH_LENGTH_POWER: usize = 2; - pub const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; - pub type UPathLengthPower = u16; - pub type UPathCountPower = u16; - - impl Encoder for Path { - fn encoded_len(&self) -> usize { - let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; - let data_len = self.iter().map(Bytes::len).sum::(); - lengths_len + data_len - } - - /// Encode in the format for signatures into a mutable vector. - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - let component_count = self.len() as UPathCountPower; - out.write_all(&component_count.to_be_bytes())?; - for component in self.iter() { - let len = component.len() as UPathLengthPower; - out.write_all(&len.to_be_bytes())?; - out.write_all(component)?; - } - Ok(()) - } - } - - impl Encoder for Entry { - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - out.write_all(self.namespace_id.as_bytes())?; - out.write_all(self.subspace_id.as_bytes())?; - self.path.encode_into(out)?; - out.write_all(&self.timestamp.to_be_bytes())?; - out.write_all(&self.payload_length.to_be_bytes())?; - out.write_all(self.payload_digest.as_bytes())?; - Ok(()) - } - - fn encoded_len(&self) -> usize { - let path_len = self.path.encoded_len(); - NamespaceId::LENGTH + SubspaceId::LENGTH + path_len + 8 + 8 + DIGEST_LENGTH - } - } - - #[derive(Debug, Clone)] - pub struct RelativePath<'a> { - pub path: &'a Path, - pub reference: &'a Path, - } - impl<'a> RelativePath<'a> { - pub fn new(path: &'a Path, reference: &'a Path) -> Self { - Self { path, reference } - } - } - - impl<'a> Encoder for RelativePath<'a> { - fn encoded_len(&self) -> usize { - let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; - let remaining_path = self.path.remove_prefix(common_prefix_len as usize); - PATH_COUNT_POWER + remaining_path.encoded_len() - } - - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; - out.write_all(&common_prefix_len.to_be_bytes())?; - let remaining_path = self.path.remove_prefix(common_prefix_len as usize); - remaining_path.encode_into(out)?; - Ok(()) - } - } -} diff --git a/iroh-willow/src/session/challenge.rs b/iroh-willow/src/session/challenge.rs new file mode 100644 index 0000000000..4d95da33ec --- /dev/null +++ b/iroh-willow/src/session/challenge.rs @@ -0,0 +1,108 @@ +/// Data from the initial transmission +/// +/// This happens before the session is initialized. +#[derive(Debug)] +pub struct InitialTransmission { + /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. + pub our_nonce: AccessChallenge, + /// The [`ChallengeHash`] we received from the remote. + pub received_commitment: ChallengeHash, + /// The maximum payload size we received from the remote. + pub their_max_payload_size: u64, +} + +#[derive(Debug)] +pub enum ChallengeState { + Committed { + our_nonce: AccessChallenge, + received_commitment: ChallengeHash, + }, + Revealed { + ours: AccessChallengeBytes, + theirs: AccessChallengeBytes, + }, +} + +impl ChallengeState { + pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { + match self { + Self::Committed { + our_nonce, + received_commitment, + } => { + if their_nonce.hash() != *received_commitment { + return Err(Error::BrokenCommittement); + } + let ours = match our_role { + Role::Alfie => bitwise_xor(our_nonce.to_bytes(), their_nonce.to_bytes()), + Role::Betty => { + bitwise_xor_complement(our_nonce.to_bytes(), their_nonce.to_bytes()) + } + }; + let theirs = bitwise_complement(ours); + *self = Self::Revealed { ours, theirs }; + Ok(()) + } + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + pub fn is_revealed(&self) -> bool { + matches!(self, Self::Revealed { .. }) + } + + pub fn sign(&self, secret_key: &UserSecretKey) -> Result { + let signable = self.signable()?; + let signature = secret_key.sign(&signable); + Ok(signature) + } + + pub fn signable(&self) -> Result<[u8; 32], Error> { + let challenge = self.get_ours()?; + Ok(*challenge) + } + + pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { + let their_challenge = self.get_theirs()?; + user_key.verify(their_challenge, signature)?; + Ok(()) + } + + fn get_ours(&self) -> Result<&AccessChallengeBytes, Error> { + match self { + Self::Revealed { ours, .. } => Ok(ours), + _ => Err(Error::InvalidMessageInCurrentState), + } + } + + fn get_theirs(&self) -> Result<&AccessChallengeBytes, Error> { + match self { + Self::Revealed { theirs, .. } => Ok(theirs), + _ => Err(Error::InvalidMessageInCurrentState), + } + } +} + +fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = x1 ^ x2; + } + res +} + +fn bitwise_complement(a: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, x) in a.iter().enumerate() { + res[i] = !x; + } + res +} + +fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { + let mut res = [0u8; N]; + for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { + res[i] = !(x1 ^ x2); + } + res +} diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 393167f209..9588e616ba 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -11,6 +11,8 @@ use crate::{ util::channel::{ReadError, WriteError}, }; +// This is a catch-all error type for the session module. +// TODO: Split this into multiple error types #[derive(Debug, thiserror::Error)] pub enum Error { #[error("local store failed: {0}")] diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 4966aa74fe..948766804f 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -6,3 +6,19 @@ pub mod gen_stream; pub mod queue; pub mod stream; pub mod time; + +/// Increment a fixed-length byte string by one, by incrementing the last byte that is not 255 by one. +/// +/// Returns None if all bytes are 255. +pub fn increment_by_one(value: &[u8; N]) -> Option<[u8; N]> { + let mut out = *value; + for char in out.iter_mut().rev() { + if *char != 255 { + *char += 1; + return Some(out); + } else { + *char = 0; + } + } + None +} diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index ef95b8bcea..f0679deed2 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,427 +1,427 @@ -use std::time::Duration; - -use anyhow::Result; -use futures_concurrency::future::TryJoin; -use futures_lite::StreamExt; - -use iroh_willow::{ - proto::{grouping::Area, willow::Path}, - session::{ - intents::{Completion, EventKind}, - Interests, SessionInit, SessionMode, - }, -}; - -use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; - -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_two_intents() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_two_intents"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - - insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; - insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; - - let task_foo_path = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"foo"]).unwrap(); - - let init = SessionInit::new( - Interests::builder().add_area(namespace, [Area::path(path.clone())]), - SessionMode::ReconcileOnce, - ); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - assert!(intent.next().await.is_none()); - } - }); - - let task_bar_path = tokio::task::spawn({ - let alfie = alfie.clone(); - async move { - let path = Path::new(&[b"bar"]).unwrap(); - - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - assert!(intent.next().await.is_none()); - } - }); - - task_foo_path.await.unwrap(); - task_bar_path.await.unwrap(); - - // tokio::time::sleep(std::time::Duration::from_secs(1)).await; - - [alfie, betty].map(Peer::shutdown).try_join().await?; - - Ok(()) -} - -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_update_intent() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_update_intent"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - - insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - - let path = Path::new(&[b"foo"]).unwrap(); - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::Live); - let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - - assert_eq!( - intent.next().await.unwrap(), - EventKind::CapabilityIntersection { - namespace, - area: Area::full(), - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - let path = Path::new(&[b"bar"]).unwrap(); - let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); - intent.add_interests(interests).await?; - - assert_eq!( - intent.next().await.unwrap(), - EventKind::InterestIntersection { - namespace, - area: Area::path(path.clone()).into() - } - ); - assert_eq!( - intent.next().await.unwrap(), - EventKind::Reconciled { - namespace, - area: Area::path(path.clone()).into() - } - ); - - assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - - intent.close().await; - - assert!(intent.next().await.is_none(),); - - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - -/// Test immediate shutdown. -// TODO: This does not really test much. Used it for log reading of graceful connection termination. -// Not sure where we should expose whether connections closed gracefully or not? -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_shutdown_immediate() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_shutdown_immediate"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - let mut intent = alfie - .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) - .await?; - let completion = intent.complete().await?; - assert_eq!(completion, Completion::Complete); - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - -/// Test shutdown after a timeout. -// TODO: This does not really test much. Used it for log reading of graceful connection termination. -// Not sure where we should expose whether connections closed gracefully or not? -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_shutdown_timeout() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_shutdown_timeout"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; - let betty_node_id = betty.node_id(); - let mut intent = alfie - .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) - .await?; - let completion = intent.complete().await?; - assert_eq!(completion, Completion::Complete); - tokio::time::sleep(Duration::from_secs(1)).await; - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - -#[tokio::test(flavor = "multi_thread")] -async fn peer_manager_twoway_loop() -> Result<()> { - iroh_test::logging::setup_multithreaded(); - let mut rng = create_rng("peer_manager_twoway_loop"); - - let [alfie, betty] = spawn_two(&mut rng).await?; - let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; - insert(&alfie, namespace, alfie_user, &[b"foo"], "foo 1").await?; - insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - let alfie_node_id = alfie.node_id(); - let betty_node_id = betty.node_id(); - for _i in 0..20 { - let alfie = alfie.clone(); - let betty = betty.clone(); - let task_alfie = tokio::task::spawn(async move { - let mut intent = alfie - .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) - .await - .unwrap(); - let completion = intent.complete().await.expect("failed to complete intent"); - assert_eq!(completion, Completion::Complete); - }); - - let task_betty = tokio::task::spawn(async move { - let mut intent = betty - .sync_with_peer(alfie_node_id, SessionInit::reconcile_once(Interests::all())) - .await - .unwrap(); - let completion = intent.complete().await.expect("failed to complete intent"); - assert_eq!(completion, Completion::Complete); - }); - task_alfie.await.unwrap(); - task_betty.await.unwrap(); - } - [alfie, betty].map(Peer::shutdown).try_join().await?; - Ok(()) -} - -mod util { - use std::sync::{Arc, Mutex}; - - use anyhow::Result; - use bytes::Bytes; - use futures_concurrency::future::TryJoin; - use iroh_net::{Endpoint, NodeId}; - use rand::SeedableRng; - use rand_chacha::ChaCha12Rng; - use rand_core::CryptoRngCore; - use tokio::task::JoinHandle; - - use iroh_willow::{ - auth::{CapSelector, DelegateTo, RestrictArea}, - engine::{AcceptOpts, Engine}, - form::EntryForm, - net::ALPN, - proto::{ - keys::{NamespaceId, NamespaceKind, UserId}, - meadowcap::AccessMode, - willow::Path, - }, - }; - - pub fn create_rng(seed: &str) -> ChaCha12Rng { - let seed = iroh_base::hash::Hash::new(seed); - ChaCha12Rng::from_seed(*(seed.as_bytes())) - } - - #[derive(Debug, Clone)] - pub struct Peer { - endpoint: Endpoint, - engine: Engine, - accept_task: Arc>>>>, - } - - impl Peer { - pub async fn spawn( - secret_key: iroh_net::key::SecretKey, - accept_opts: AcceptOpts, - ) -> Result { - let endpoint = Endpoint::builder() - .secret_key(secret_key) - .alpns(vec![ALPN.to_vec()]) - .bind(0) - .await?; - let payloads = iroh_blobs::store::mem::Store::default(); - let create_store = move || iroh_willow::store::memory::Store::new(payloads); - let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); - let accept_task = tokio::task::spawn({ - let engine = engine.clone(); - let endpoint = endpoint.clone(); - async move { - while let Some(mut conn) = endpoint.accept().await { - let Ok(alpn) = conn.alpn().await else { - continue; - }; - if alpn != ALPN { - continue; - } - let Ok(conn) = conn.await else { - continue; - }; - engine.handle_connection(conn).await?; - } - Result::Ok(()) - } - }); - Ok(Self { - endpoint, - engine, - accept_task: Arc::new(Mutex::new(Some(accept_task))), - }) - } - - pub async fn shutdown(self) -> Result<()> { - let accept_task = self.accept_task.lock().unwrap().take(); - if let Some(accept_task) = accept_task { - accept_task.abort(); - match accept_task.await { - Err(err) if err.is_cancelled() => {} - Ok(Ok(())) => {} - Err(err) => Err(err)?, - Ok(Err(err)) => Err(err)?, - } - } - self.engine.shutdown().await?; - self.endpoint.close(0u8.into(), b"").await?; - Ok(()) - } - - pub fn node_id(&self) -> NodeId { - self.endpoint.node_id() - } - } - - impl std::ops::Deref for Peer { - type Target = Engine; - fn deref(&self) -> &Self::Target { - &self.engine - } - } - - pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { - let peers = [ - iroh_net::key::SecretKey::generate_with_rng(rng), - iroh_net::key::SecretKey::generate_with_rng(rng), - ] - .map(|secret_key| Peer::spawn(secret_key, Default::default())) - .try_join() - .await?; - - peers[0] - .endpoint - .add_node_addr(peers[1].endpoint.node_addr().await?)?; - - peers[1] - .endpoint - .add_node_addr(peers[0].endpoint.node_addr().await?)?; - - Ok(peers) - } - - pub async fn setup_and_delegate( - alfie: &Engine, - betty: &Engine, - ) -> Result<(NamespaceId, UserId, UserId)> { - let user_alfie = alfie.create_user().await?; - let user_betty = betty.create_user().await?; - - let namespace_id = alfie - .create_namespace(NamespaceKind::Owned, user_alfie) - .await?; - - let cap_for_betty = alfie - .delegate_caps( - CapSelector::widest(namespace_id), - AccessMode::ReadWrite, - DelegateTo::new(user_betty, RestrictArea::None), - ) - .await?; - - betty.import_caps(cap_for_betty).await?; - Ok((namespace_id, user_alfie, user_betty)) - } - - pub async fn insert( - handle: &Engine, - namespace_id: NamespaceId, - user: UserId, - path: &[&[u8]], - bytes: impl Into, - ) -> Result<()> { - let path = Path::new(path)?; - let entry = EntryForm::new_bytes(namespace_id, path, bytes); - handle.insert(entry, user).await?; - Ok(()) - } -} +// use std::time::Duration; + +// use anyhow::Result; +// use futures_concurrency::future::TryJoin; +// use futures_lite::StreamExt; + +// use iroh_willow::{ +// proto::{grouping::Area, willow::Path}, +// session::{ +// intents::{Completion, EventKind}, +// Interests, SessionInit, SessionMode, +// }, +// }; + +// use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; + +// #[tokio::test(flavor = "multi_thread")] +// async fn peer_manager_two_intents() -> Result<()> { +// iroh_test::logging::setup_multithreaded(); +// let mut rng = create_rng("peer_manager_two_intents"); + +// let [alfie, betty] = spawn_two(&mut rng).await?; +// let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; +// let betty_node_id = betty.node_id(); + +// insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; +// insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; +// insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; + +// let task_foo_path = tokio::task::spawn({ +// let alfie = alfie.clone(); +// async move { +// let path = Path::new(&[b"foo"]).unwrap(); + +// let init = SessionInit::new( +// Interests::builder().add_area(namespace, [Area::path(path.clone())]), +// SessionMode::ReconcileOnce, +// ); +// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::CapabilityIntersection { +// namespace, +// area: Area::full(), +// } +// ); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::InterestIntersection { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::Reconciled { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); + +// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + +// assert!(intent.next().await.is_none()); +// } +// }); + +// let task_bar_path = tokio::task::spawn({ +// let alfie = alfie.clone(); +// async move { +// let path = Path::new(&[b"bar"]).unwrap(); + +// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); +// let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + +// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::CapabilityIntersection { +// namespace, +// area: Area::full(), +// } +// ); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::InterestIntersection { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::Reconciled { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); + +// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + +// assert!(intent.next().await.is_none()); +// } +// }); + +// task_foo_path.await.unwrap(); +// task_bar_path.await.unwrap(); + +// // tokio::time::sleep(std::time::Duration::from_secs(1)).await; + +// [alfie, betty].map(Peer::shutdown).try_join().await?; + +// Ok(()) +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn peer_manager_update_intent() -> Result<()> { +// iroh_test::logging::setup_multithreaded(); +// let mut rng = create_rng("peer_manager_update_intent"); + +// let [alfie, betty] = spawn_two(&mut rng).await?; +// let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; +// let betty_node_id = betty.node_id(); + +// insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; +// insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + +// let path = Path::new(&[b"foo"]).unwrap(); +// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); +// let init = SessionInit::new(interests, SessionMode::Live); +// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::CapabilityIntersection { +// namespace, +// area: Area::full(), +// } +// ); +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::InterestIntersection { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::Reconciled { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); +// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + +// let path = Path::new(&[b"bar"]).unwrap(); +// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); +// intent.add_interests(interests).await?; + +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::InterestIntersection { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); +// assert_eq!( +// intent.next().await.unwrap(), +// EventKind::Reconciled { +// namespace, +// area: Area::path(path.clone()).into() +// } +// ); + +// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + +// intent.close().await; + +// assert!(intent.next().await.is_none(),); + +// [alfie, betty].map(Peer::shutdown).try_join().await?; +// Ok(()) +// } + +// /// Test immediate shutdown. +// // TODO: This does not really test much. Used it for log reading of graceful connection termination. +// // Not sure where we should expose whether connections closed gracefully or not? +// #[tokio::test(flavor = "multi_thread")] +// async fn peer_manager_shutdown_immediate() -> Result<()> { +// iroh_test::logging::setup_multithreaded(); +// let mut rng = create_rng("peer_manager_shutdown_immediate"); + +// let [alfie, betty] = spawn_two(&mut rng).await?; +// let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; +// let betty_node_id = betty.node_id(); +// let mut intent = alfie +// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) +// .await?; +// let completion = intent.complete().await?; +// assert_eq!(completion, Completion::Complete); +// [alfie, betty].map(Peer::shutdown).try_join().await?; +// Ok(()) +// } + +// /// Test shutdown after a timeout. +// // TODO: This does not really test much. Used it for log reading of graceful connection termination. +// // Not sure where we should expose whether connections closed gracefully or not? +// #[tokio::test(flavor = "multi_thread")] +// async fn peer_manager_shutdown_timeout() -> Result<()> { +// iroh_test::logging::setup_multithreaded(); +// let mut rng = create_rng("peer_manager_shutdown_timeout"); + +// let [alfie, betty] = spawn_two(&mut rng).await?; +// let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; +// let betty_node_id = betty.node_id(); +// let mut intent = alfie +// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) +// .await?; +// let completion = intent.complete().await?; +// assert_eq!(completion, Completion::Complete); +// tokio::time::sleep(Duration::from_secs(1)).await; +// [alfie, betty].map(Peer::shutdown).try_join().await?; +// Ok(()) +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn peer_manager_twoway_loop() -> Result<()> { +// iroh_test::logging::setup_multithreaded(); +// let mut rng = create_rng("peer_manager_twoway_loop"); + +// let [alfie, betty] = spawn_two(&mut rng).await?; +// let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; +// insert(&alfie, namespace, alfie_user, &[b"foo"], "foo 1").await?; +// insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; +// let alfie_node_id = alfie.node_id(); +// let betty_node_id = betty.node_id(); +// for _i in 0..20 { +// let alfie = alfie.clone(); +// let betty = betty.clone(); +// let task_alfie = tokio::task::spawn(async move { +// let mut intent = alfie +// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) +// .await +// .unwrap(); +// let completion = intent.complete().await.expect("failed to complete intent"); +// assert_eq!(completion, Completion::Complete); +// }); + +// let task_betty = tokio::task::spawn(async move { +// let mut intent = betty +// .sync_with_peer(alfie_node_id, SessionInit::reconcile_once(Interests::all())) +// .await +// .unwrap(); +// let completion = intent.complete().await.expect("failed to complete intent"); +// assert_eq!(completion, Completion::Complete); +// }); +// task_alfie.await.unwrap(); +// task_betty.await.unwrap(); +// } +// [alfie, betty].map(Peer::shutdown).try_join().await?; +// Ok(()) +// } + +// mod util { +// use std::sync::{Arc, Mutex}; + +// use anyhow::Result; +// use bytes::Bytes; +// use futures_concurrency::future::TryJoin; +// use iroh_net::{Endpoint, NodeId}; +// use rand::SeedableRng; +// use rand_chacha::ChaCha12Rng; +// use rand_core::CryptoRngCore; +// use tokio::task::JoinHandle; + +// use iroh_willow::{ +// auth::{CapSelector, DelegateTo, RestrictArea}, +// engine::{AcceptOpts, Engine}, +// form::EntryForm, +// net::ALPN, +// proto::{ +// keys::{NamespaceId, NamespaceKind, UserId}, +// meadowcap::AccessMode, +// willow::Path, +// }, +// }; + +// pub fn create_rng(seed: &str) -> ChaCha12Rng { +// let seed = iroh_base::hash::Hash::new(seed); +// ChaCha12Rng::from_seed(*(seed.as_bytes())) +// } + +// #[derive(Debug, Clone)] +// pub struct Peer { +// endpoint: Endpoint, +// engine: Engine, +// accept_task: Arc>>>>, +// } + +// impl Peer { +// pub async fn spawn( +// secret_key: iroh_net::key::SecretKey, +// accept_opts: AcceptOpts, +// ) -> Result { +// let endpoint = Endpoint::builder() +// .secret_key(secret_key) +// .alpns(vec![ALPN.to_vec()]) +// .bind(0) +// .await?; +// let payloads = iroh_blobs::store::mem::Store::default(); +// let create_store = move || iroh_willow::store::memory::Store::new(payloads); +// let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); +// let accept_task = tokio::task::spawn({ +// let engine = engine.clone(); +// let endpoint = endpoint.clone(); +// async move { +// while let Some(mut conn) = endpoint.accept().await { +// let Ok(alpn) = conn.alpn().await else { +// continue; +// }; +// if alpn != ALPN { +// continue; +// } +// let Ok(conn) = conn.await else { +// continue; +// }; +// engine.handle_connection(conn).await?; +// } +// Result::Ok(()) +// } +// }); +// Ok(Self { +// endpoint, +// engine, +// accept_task: Arc::new(Mutex::new(Some(accept_task))), +// }) +// } + +// pub async fn shutdown(self) -> Result<()> { +// let accept_task = self.accept_task.lock().unwrap().take(); +// if let Some(accept_task) = accept_task { +// accept_task.abort(); +// match accept_task.await { +// Err(err) if err.is_cancelled() => {} +// Ok(Ok(())) => {} +// Err(err) => Err(err)?, +// Ok(Err(err)) => Err(err)?, +// } +// } +// self.engine.shutdown().await?; +// self.endpoint.close(0u8.into(), b"").await?; +// Ok(()) +// } + +// pub fn node_id(&self) -> NodeId { +// self.endpoint.node_id() +// } +// } + +// impl std::ops::Deref for Peer { +// type Target = Engine; +// fn deref(&self) -> &Self::Target { +// &self.engine +// } +// } + +// pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { +// let peers = [ +// iroh_net::key::SecretKey::generate_with_rng(rng), +// iroh_net::key::SecretKey::generate_with_rng(rng), +// ] +// .map(|secret_key| Peer::spawn(secret_key, Default::default())) +// .try_join() +// .await?; + +// peers[0] +// .endpoint +// .add_node_addr(peers[1].endpoint.node_addr().await?)?; + +// peers[1] +// .endpoint +// .add_node_addr(peers[0].endpoint.node_addr().await?)?; + +// Ok(peers) +// } + +// pub async fn setup_and_delegate( +// alfie: &Engine, +// betty: &Engine, +// ) -> Result<(NamespaceId, UserId, UserId)> { +// let user_alfie = alfie.create_user().await?; +// let user_betty = betty.create_user().await?; + +// let namespace_id = alfie +// .create_namespace(NamespaceKind::Owned, user_alfie) +// .await?; + +// let cap_for_betty = alfie +// .delegate_caps( +// CapSelector::widest(namespace_id), +// AccessMode::ReadWrite, +// DelegateTo::new(user_betty, RestrictArea::None), +// ) +// .await?; + +// betty.import_caps(cap_for_betty).await?; +// Ok((namespace_id, user_alfie, user_betty)) +// } + +// pub async fn insert( +// handle: &Engine, +// namespace_id: NamespaceId, +// user: UserId, +// path: &[&[u8]], +// bytes: impl Into, +// ) -> Result<()> { +// let path = Path::new(path)?; +// let entry = EntryForm::new_bytes(namespace_id, path, bytes); +// handle.insert(entry, user).await?; +// Ok(()) +// } +// } From b422c3ff9891361e688236c6b13b4fbc4d227f1b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 11:59:52 +0200 Subject: [PATCH 119/198] wip: use willow-rs types --- Cargo.lock | 3 - Cargo.toml | 36 +- iroh-willow/src/proto.rs | 5 +- iroh-willow/src/proto/data_model.rs | 229 +---- iroh-willow/src/proto/grouping.rs | 94 +- iroh-willow/src/proto/keys.rs | 90 +- iroh-willow/src/proto/meadowcap.rs | 178 +++- iroh-willow/src/proto/sync.rs | 1015 +-------------------- iroh-willow/src/proto/wgps.rs | 18 + iroh-willow/src/proto/wgps/channels.rs | 161 ++++ iroh-willow/src/proto/wgps/fingerprint.rs | 48 + iroh-willow/src/proto/wgps/handles.rs | 83 ++ iroh-willow/src/proto/wgps/messages.rs | 531 +++++++++++ 13 files changed, 1265 insertions(+), 1226 deletions(-) create mode 100644 iroh-willow/src/proto/wgps.rs create mode 100644 iroh-willow/src/proto/wgps/channels.rs create mode 100644 iroh-willow/src/proto/wgps/fingerprint.rs create mode 100644 iroh-willow/src/proto/wgps/handles.rs create mode 100644 iroh-willow/src/proto/wgps/messages.rs diff --git a/Cargo.lock b/Cargo.lock index cce3262300..7d9779ed00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3238,7 +3238,6 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" dependencies = [ "either", "signature", @@ -6638,7 +6637,6 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" dependencies = [ "bytes", "either", @@ -6650,7 +6648,6 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#b98500eebb46bdbbb137f2b049c54f86bf49bcb6" dependencies = [ "either", "syncify", diff --git a/Cargo.toml b/Cargo.toml index 4ddb52cbd8..2827b99e6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,17 +1,17 @@ [workspace] members = [ - "iroh", - "iroh-blobs", - "iroh-base", - "iroh-dns-server", - "iroh-gossip", - "iroh-metrics", - "iroh-net", - "iroh-docs", - "iroh-test", - "iroh-net/bench", - "iroh-cli", - "iroh-willow", + "iroh", + "iroh-blobs", + "iroh-base", + "iroh-dns-server", + "iroh-gossip", + "iroh-metrics", + "iroh-net", + "iroh-docs", + "iroh-test", + "iroh-net/bench", + "iroh-cli", + "iroh-willow", ] resolver = "2" @@ -43,9 +43,9 @@ missing_debug_implementations = "warn" unused-async = "warn" [patch.crates-io] -# willow-data-model = { path = "../willow-rs/data-model" } -# willow-encoding = { path = "../willow-rs/encoding" } -# meadowcap = { path = "../willow-rs/meadowcap" } -willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +willow-data-model = { path = "../willow-rs/data-model" } +willow-encoding = { path = "../willow-rs/encoding" } +meadowcap = { path = "../willow-rs/meadowcap" } +# willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +# willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +# meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 279fe07d0e..ab1a399d1d 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,8 +1,9 @@ pub mod challenge; +pub mod data_model; pub mod grouping; pub mod keys; pub mod meadowcap; pub mod pai; -// pub mod sync; -pub mod data_model; +pub mod sync; +pub mod wgps; pub use self::data_model as willow; diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index be18ac2862..10144fcf5f 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -1,5 +1,7 @@ use iroh_base::hash::Hash; +use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; use willow_data_model::InvalidPathError; +use willow_encoding::sync::{Decodable, Encodable}; use super::{ keys, @@ -132,6 +134,20 @@ pub struct Entry( >, ); +impl Entry { + pub fn encode(&self) -> Vec { + let mut consumer = IntoVec::::new(); + self.0.encode(&mut consumer).expect("encoding not to fail"); + consumer.into_vec() + } + + pub fn decode(bytes: &[u8]) -> anyhow::Result { + let mut producer = FromSlice::::new(bytes); + let entry = willow_data_model::Entry::decode(&mut producer)?; + Ok(Self(entry)) + } +} + #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] pub struct AuthorisedEntry( willow_data_model::AuthorisedEntry< @@ -208,182 +224,37 @@ mod encoding { } } -// /// A PossiblyAuthorisedEntry is a pair of an Entry and an AuthorisationToken. -// #[derive(Debug, Serialize, Deserialize)] -// pub struct PossiblyAuthorisedEntry(Entry, AuthorisationToken); - -// impl PossiblyAuthorisedEntry { -// pub fn new(entry: Entry, authorisation_token: AuthorisationToken) -> Self { -// Self(entry, authorisation_token) -// } -// pub fn is_authorised(&self) -> bool { -// is_authorised_write(&self.0, &self.1) -// } - -// pub fn authorise(self) -> Result { -// match self.is_authorised() { -// true => Ok(AuthorisedEntry(self.0, self.1)), -// false => Err(Unauthorised), -// } -// } - -// pub fn into_parts(self) -> (Entry, AuthorisationToken) { -// (self.0, self.1) -// } -// } - -// impl TryFrom for AuthorisedEntry { -// type Error = Unauthorised; -// fn try_from(value: PossiblyAuthorisedEntry) -> Result { -// value.authorise() -// } -// } - -// /// An AuthorisedEntry is a PossiblyAuthorisedEntry for which is_authorised_write returns true. -// #[derive(Debug, Serialize, Deserialize, Clone)] -// pub struct AuthorisedEntry(Entry, AuthorisationToken); - -// impl AuthorisedEntry { -// pub fn try_from_parts( -// entry: Entry, -// static_token: StaticToken, -// dynamic_token: DynamicToken, -// ) -> Result { -// let authorisation_token = AuthorisationToken::from_parts(static_token, dynamic_token); -// PossiblyAuthorisedEntry::new(entry, authorisation_token).authorise() -// } - -// pub fn entry(&self) -> &Entry { -// &self.0 -// } - -// pub fn into_entry(self) -> Entry { -// self.0 -// } - -// pub fn is_authorised(&self) -> bool { -// true -// } - -// /// Use only if you can assure that the authorisation was previously checked! -// pub fn from_parts_unchecked(entry: Entry, authorisation_token: AuthorisationToken) -> Self { -// Self(entry, authorisation_token) -// } - -// pub fn into_parts(self) -> (Entry, AuthorisationToken) { -// (self.0, self.1) -// } - -// pub fn namespace_id(&self) -> NamespaceId { -// self.1.capability.granted_namespace().into() -// } -// } - -// // TODO: zerocopy support for path -// // #[allow(missing_debug_implementations)] -// // #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// // #[repr(C, packed)] -// // pub struct ComponentRef([u8]); -// // -// // #[allow(missing_debug_implementations)] -// // #[derive(KnownLayout, FromBytes, NoCell, Unaligned, IntoBytes)] -// // #[repr(C, packed)] -// // pub struct PathRef([ComponentRef]); -// // pub struct PathRef<'a>(&'a [&'a [u8]]); -// // impl<'a> AsRef> for Path { -// // fn as_ref(&'a self) -> &'a PathRef<'a> { -// // todo!() -// // } -// // } - -// pub mod encodings { -// //! Encoding for Willow entries -// //! -// //! TODO: Verify that these are correct accoring to the spec! These encodings are the message -// //! bytes for authorisation signatures, so we better not need to change them again. - -// use std::io::Write; - -// use bytes::Bytes; - -// use crate::{ -// proto::willow::{NamespaceId, SubspaceId}, -// util::codec::Encoder, -// }; - -// use super::{Entry, Path, DIGEST_LENGTH}; - -// /// `PATH_LENGTH_POWER` is the least natural number such that `256 ^ PATH_LENGTH_POWER ≥ MAX_COMPONENT_LENGTH`. -// /// We can represent the length of any Component in path_length_power bytes. -// /// UPathLengthPower denotes the type of numbers between zero (inclusive) and 256path_length_power (exclusive). -// /// -// /// The value `2` means that we can encode paths up to 64KiB long. -// pub const PATH_LENGTH_POWER: usize = 2; -// pub const PATH_COUNT_POWER: usize = PATH_LENGTH_POWER; -// pub type UPathLengthPower = u16; -// pub type UPathCountPower = u16; - -// impl Encoder for Path { -// fn encoded_len(&self) -> usize { -// let lengths_len = PATH_COUNT_POWER + self.len() * PATH_LENGTH_POWER; -// let data_len = self.iter().map(Bytes::len).sum::(); -// lengths_len + data_len -// } - -// /// Encode in the format for signatures into a mutable vector. -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// let component_count = self.len() as UPathCountPower; -// out.write_all(&component_count.to_be_bytes())?; -// for component in self.iter() { -// let len = component.len() as UPathLengthPower; -// out.write_all(&len.to_be_bytes())?; -// out.write_all(component)?; -// } -// Ok(()) -// } -// } - -// impl Encoder for entry { -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// out.write_all(self.namespace_id.as_bytes())?; -// out.write_all(self.subspace_id.as_bytes())?; -// self.path.encode_into(out)?; -// out.write_all(&self.timestamp.to_be_bytes())?; -// out.write_all(&self.payload_length.to_be_bytes())?; -// out.write_all(self.payload_digest.as_bytes())?; -// Ok(()) -// } - -// fn encoded_len(&self) -> usize { -// let path_len = self.path.encoded_len(); -// NamespaceId::LENGTH + SubspaceId::LENGTH + path_len + 8 + 8 + DIGEST_LENGTH -// } -// } - -// #[derive(Debug, Clone)] -// pub struct RelativePath<'a> { -// pub path: &'a Path, -// pub reference: &'a Path, -// } -// impl<'a> RelativePath<'a> { -// pub fn new(path: &'a Path, reference: &'a Path) -> Self { -// Self { path, reference } -// } -// } - -// impl<'a> Encoder for RelativePath<'a> { -// fn encoded_len(&self) -> usize { -// let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; -// let remaining_path = self.path.remove_prefix(common_prefix_len as usize); -// PATH_COUNT_POWER + remaining_path.encoded_len() -// } - -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// let common_prefix_len = self.path.common_prefix_len(self.reference) as UPathCountPower; -// out.write_all(&common_prefix_len.to_be_bytes())?; -// let remaining_path = self.path.remove_prefix(common_prefix_len as usize); -// remaining_path.encode_into(out)?; -// Ok(()) -// } -// } -// } +pub mod serde_encoding { + use serde::{Deserialize, Deserializer, Serialize}; + use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; + use willow_encoding::sync::{Decodable, Encodable}; + + use super::*; + + impl Serialize for Entry { + fn serialize(&self, serializer: S) -> Result { + let encoded = { + let mut consumer = IntoVec::::new(); + self.0.encode(&mut consumer).expect("encoding not to fail"); + consumer.into_vec() + }; + encoded.serialize(serializer) + } + } + + impl<'de> Deserialize<'de> for Entry { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data: Vec = Deserialize::deserialize(deserializer)?; + let decoded = { + let mut producer = FromSlice::new(&data); + let decoded = + willow_data_model::Entry::decode(&mut producer).expect("decoding not to fail"); + Self(decoded) + }; + Ok(decoded) + } + } +} diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 18da71db36..70a44d0d96 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -33,13 +33,15 @@ pub type Range = willow_data_model::grouping::Range; // >, // ); -pub type Three3Range = willow_data_model::grouping::Range3d< +pub type Range3d = willow_data_model::grouping::Range3d< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, MAX_PATH_LENGTH, SubspaceId, >; +pub type ThreeDRange = Range3d; + pub type Area = willow_data_model::grouping::Area< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -131,6 +133,96 @@ impl Point { } } +pub mod serde_encoding { + use serde::{Deserialize, Deserializer, Serialize}; + use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; + use willow_encoding::sync::{RelativeDecodable, RelativeEncodable}; + + use super::*; + + impl Serialize for AreaOfInterest { + fn serialize(&self, serializer: S) -> Result { + let relative = Area::new_full(); + let encoded_area = { + let mut consumer = IntoVec::::new(); + self.0 + .area + .relative_encode(&relative, &mut consumer) + .expect("encoding not to fail"); + consumer.into_vec() + }; + (encoded_area, self.0.max_count, self.0.max_size).serialize(serializer) + } + } + + impl<'de> Deserialize<'de> for AreaOfInterest { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let relative = Area::new_full(); + let (encoded_area, max_count, max_size): (Vec, u64, u64) = + Deserialize::deserialize(deserializer)?; + let decoded_area = { + let mut producer = FromSlice::new(&encoded_area); + let decoded = + willow_data_model::grouping::Area::relative_decode(&relative, &mut producer) + .map_err(|err| serde::de::Error::custom(format!("{err}")))?; + decoded + }; + let aoi = willow_data_model::grouping::AreaOfInterest { + area: decoded_area, + max_count, + max_size, + }; + Ok(Self(aoi)) + } + } + + #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] + pub struct SerdeRange3d(Range3d); + + impl Serialize for SerdeRange3d { + fn serialize(&self, serializer: S) -> Result { + let relative = Range3d::new( + Default::default(), + Range::new_open(Path::new_empty()), + Default::default(), + ); + let encoded = { + let mut consumer = IntoVec::::new(); + self.0 + .relative_encode(&relative, &mut consumer) + .expect("encoding not to fail"); + consumer.into_vec() + }; + encoded.serialize(serializer) + } + } + + impl<'de> Deserialize<'de> for SerdeRange3d { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let relative = Range3d::new( + Default::default(), + Range::new_open(Path::new_empty()), + Default::default(), + ); + let encoded_range: Vec = Deserialize::deserialize(deserializer)?; + let decoded_range = { + let mut producer = FromSlice::new(&encoded_range); + let decoded = + willow_data_model::grouping::Range3d::relative_decode(&relative, &mut producer) + .map_err(|err| serde::de::Error::custom(format!("{err}")))?; + decoded + }; + Ok(Self(decoded_range)) + } + } +} + #[cfg(test)] mod tests { use std::collections::HashSet; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 4a10212520..c78c2ded96 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -343,6 +343,13 @@ impl From<&UserSecretKey> for UserPublicKey { #[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct NamespaceSignature(ed25519_dalek::Signature); +impl NamespaceSignature { + /// Create from a byte array. + pub fn from_bytes(bytes: [u8; SIGNATURE_LENGTH]) -> Self { + Self(ed25519_dalek::Signature::from_bytes(&bytes)) + } +} + impl PartialOrd for NamespaceSignature { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -367,6 +374,13 @@ impl std::hash::Hash for NamespaceSignature { #[derive(Serialize, Deserialize, Clone, From, PartialEq, Eq, Deref)] pub struct UserSignature(ed25519_dalek::Signature); +impl UserSignature { + /// Create from a byte array. + pub fn from_bytes(bytes: [u8; SIGNATURE_LENGTH]) -> Self { + Self(ed25519_dalek::Signature::from_bytes(&bytes)) + } +} + impl PartialOrd for UserSignature { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -623,6 +637,21 @@ mod willow_impls { key.0.verify(msg, &signature.0) } } + + impl Signer for UserSecretKey { + fn try_sign(&self, msg: &[u8]) -> Result { + Ok(UserSignature(self.0.sign(msg))) + } + } + + impl Signer for NamespaceSecretKey { + fn try_sign( + &self, + msg: &[u8], + ) -> Result { + Ok(NamespaceSignature(self.0.sign(msg))) + } + } } use syncify::syncify; @@ -630,11 +659,12 @@ use syncify::syncify_replace; #[syncify(encoding_sync)] mod encoding { - #[syncify_replace(use ufotofu::sync::BulkConsumer;)] - use ufotofu::local_nb::BulkConsumer; + #[syncify_replace(use ufotofu::sync::{BulkConsumer, BulkProducer};)] + use ufotofu::local_nb::{BulkConsumer, BulkProducer}; - #[syncify_replace(use willow_encoding::sync::Encodable;)] - use willow_encoding::Encodable; + use willow_encoding::DecodeError; + #[syncify_replace(use willow_encoding::sync::{Encodable, Decodable};)] + use willow_encoding::{Decodable, Encodable}; use super::*; @@ -709,4 +739,56 @@ mod encoding { .map_err(|err| err.reason) } } + + impl Decodable for NamespaceId { + async fn decode( + producer: &mut Producer, + ) -> Result> + where + Producer: BulkProducer, + { + let mut bytes = [0; PUBLIC_KEY_LENGTH]; + producer.bulk_overwrite_full_slice(&mut bytes).await?; + Ok(Self::from_bytes_unchecked(bytes)) + } + } + + impl Decodable for UserId { + async fn decode( + producer: &mut Producer, + ) -> Result> + where + Producer: BulkProducer, + { + let mut bytes = [0; PUBLIC_KEY_LENGTH]; + producer.bulk_overwrite_full_slice(&mut bytes).await?; + Ok(Self::from_bytes_unchecked(bytes)) + } + } + + impl Decodable for NamespaceSignature { + async fn decode( + producer: &mut Producer, + ) -> Result> + where + Producer: BulkProducer, + { + let mut bytes = [0; SIGNATURE_LENGTH]; + producer.bulk_overwrite_full_slice(&mut bytes).await?; + Ok(Self::from_bytes(bytes)) + } + } + + impl Decodable for UserSignature { + async fn decode( + producer: &mut Producer, + ) -> Result> + where + Producer: BulkProducer, + { + let mut bytes = [0; SIGNATURE_LENGTH]; + producer.bulk_overwrite_full_slice(&mut bytes).await?; + Ok(Self::from_bytes(bytes)) + } + } } diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index e0a65b4e61..5216edc994 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,5 +1,9 @@ -use super::keys; +use super::{ + grouping::Area, + keys::{self, NamespaceSecretKey, UserSecretKey}, +}; +use serde::Serialize; use willow_data_model::AuthorisationToken; pub type UserPublicKey = keys::UserPublicKey; @@ -11,6 +15,8 @@ pub type NamespaceSignature = keys::NamespaceSignature; use super::data_model::{Entry, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH}; +pub use meadowcap::AccessMode; + #[derive(Debug, derive_more::From)] pub enum SecretKey { User(keys::UserSecretKey), @@ -27,6 +33,13 @@ pub type McCapability = meadowcap::McCapability< keys::UserSignature, >; +pub type McSubspaceCapability = meadowcap::McSubspaceCapability< + keys::NamespaceId, + keys::NamespaceSignature, + keys::UserId, + keys::UserSignature, +>; + pub type McAuthorisationToken = meadowcap::McAuthorisationToken< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -47,10 +60,165 @@ pub fn is_authorised_write(entry: &Entry, token: &McAuthorisationToken) -> bool token.is_authorised_write(entry) } -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] -pub enum AccessMode { - ReadOnly, - ReadWrite, +/// Represents an authorisation to read an area of data in a Namespace. +// TODO: Move somewhere else? +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct ReadAuthorisation(McCapability, Option); + +impl ReadAuthorisation { + pub fn new(read_cap: McCapability, subspace_cap: Option) -> Self { + Self(read_cap, subspace_cap) + } + + pub fn new_owned( + namespace_secret: NamespaceSecretKey, + user_key: UserId, + ) -> anyhow::Result { + let read_cap = McCapability::new_owned( + namespace_secret.public_key().id(), + &namespace_secret, + user_key, + AccessMode::Read, + )?; + let subspace_cap = meadowcap::McSubspaceCapability::new( + namespace_secret.public_key().id(), + &namespace_secret, + user_key, + )?; + Ok(Self::new(read_cap, Some(subspace_cap))) + } + + pub fn read_cap(&self) -> &McCapability { + &self.0 + } + + pub fn subspace_cap(&self) -> Option<&McSubspaceCapability> { + self.1.as_ref() + } + + pub fn namespace(&self) -> NamespaceId { + *self.0.granted_namespace() + } + + pub fn delegate( + &self, + user_secret: &UserSecretKey, + new_user: UserId, + new_area: Area, + ) -> anyhow::Result { + let subspace_cap = match self.subspace_cap() { + Some(subspace_cap) if new_area.subspace().is_any() && !new_area.path().is_empty() => { + Some(subspace_cap.delegate(user_secret, &new_user)?) + } + _ => None, + }; + let read_cap = self + .read_cap() + .delegate(user_secret, &new_user, &new_area)?; + Ok(Self::new(read_cap, subspace_cap)) + } +} + +// #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] +// pub enum AccessMode { +// ReadOnly, +// ReadWrite, +// } + +pub mod serde_encoding { + use serde::{Deserialize, Deserializer}; + use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; + use willow_encoding::sync::{Decodable, Encodable, RelativeDecodable, RelativeEncodable}; + + use crate::proto::grouping::Area; + + use super::*; + + #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] + pub struct SerdeReadAuthorisation(SerdeMcCapability, Option); + + impl From for SerdeReadAuthorisation { + fn from(value: ReadAuthorisation) -> Self { + Self( + SerdeMcCapability::from(value.0), + value.1.map(SerdeMcSubspaceCapability::from), + ) + } + } + + impl From for ReadAuthorisation { + fn from(value: SerdeReadAuthorisation) -> Self { + Self(value.0.into(), value.1.map(Into::into)) + } + } + + #[derive( + Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, + )] + pub struct SerdeMcCapability(McCapability); + + impl Serialize for SerdeMcCapability { + fn serialize(&self, serializer: S) -> Result { + let relative = Area::new_full(); + let encoded = { + let mut consumer = IntoVec::::new(); + self.0 + .relative_encode(&relative, &mut consumer) + .expect("encoding not to fail"); + consumer.into_vec() + }; + encoded.serialize(serializer) + } + } + + impl<'de> Deserialize<'de> for SerdeMcCapability { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let relative = Area::new_full(); + let data: Vec = Deserialize::deserialize(deserializer)?; + let decoded = { + let mut producer = FromSlice::new(&data); + let decoded = McCapability::relative_decode(&relative, &mut producer) + .expect("decoding not to fail"); + Self(decoded) + }; + Ok(decoded) + } + } + + #[derive( + Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, + )] + pub struct SerdeMcSubspaceCapability(McSubspaceCapability); + + impl Serialize for SerdeMcSubspaceCapability { + fn serialize(&self, serializer: S) -> Result { + let encoded = { + let mut consumer = IntoVec::::new(); + self.0.encode(&mut consumer).expect("encoding not to fail"); + consumer.into_vec() + }; + encoded.serialize(serializer) + } + } + + impl<'de> Deserialize<'de> for SerdeMcSubspaceCapability { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data: Vec = Deserialize::deserialize(deserializer)?; + let decoded = { + let mut producer = FromSlice::new(&data); + let decoded = + McSubspaceCapability::decode(&mut producer).expect("decoding not to fail"); + Self(decoded) + }; + Ok(decoded) + } + } } // use std::{io::Write, sync::Arc}; diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs index 781ba7bed2..a15e7a0752 100644 --- a/iroh-willow/src/proto/sync.rs +++ b/iroh-willow/src/proto/sync.rs @@ -1,1014 +1 @@ -use std::{fmt, io::Write, sync::Arc}; - -use iroh_base::{base32::fmt_short, hash::Hash}; -use rand::Rng; -use rand_core::CryptoRngCore; -use serde::{Deserialize, Serialize}; -use strum::{EnumCount, VariantArray}; - -use crate::{ - proto::keys::UserSecretKey, - util::codec::{DecodeOutcome, Decoder, Encoder}, -}; - -use super::{ - grouping::{Area, AreaOfInterest, ThreeDRange}, - keys::{NamespaceSecretKey, UserPublicKey}, - meadowcap::{self, AccessMode}, - willow::{Entry, NamespaceId, DIGEST_LENGTH}, -}; - -pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; - -/// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: -/// when an Entry’s payload_length is strictly greater than the maximum payload size, -/// its Payload may only be transmitted when explicitly requested. -/// -/// The value is 4096. -pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); - -pub const CHALLENGE_LENGTH: usize = 32; -pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; - -#[derive(derive_more::Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct ChallengeHash(#[debug("{}..", fmt_short(self.0))] [u8; CHALLENGE_HASH_LENGTH]); - -impl ChallengeHash { - pub fn as_bytes(&self) -> &[u8] { - &self.0 - } - - pub fn from_bytes(bytes: [u8; CHALLENGE_HASH_LENGTH]) -> Self { - Self(bytes) - } -} - -#[derive(derive_more::Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct AccessChallenge(#[debug("{}..", fmt_short(self.0))] AccessChallengeBytes); - -pub type AccessChallengeBytes = [u8; CHALLENGE_LENGTH]; - -impl Default for AccessChallenge { - fn default() -> Self { - Self::generate() - } -} - -impl AccessChallenge { - pub fn generate() -> Self { - Self(rand::random()) - } - - pub fn generate_with_rng(rng: &mut impl CryptoRngCore) -> Self { - Self(rng.gen()) - } - - pub fn as_bytes(&self) -> &[u8] { - &self.0 - } - - pub fn to_bytes(&self) -> [u8; 32] { - self.0 - } - - pub fn hash(&self) -> ChallengeHash { - ChallengeHash(*Hash::new(self.0).as_bytes()) - } -} - -// In Meadowcap, for example, StaticToken is the type McCapability -// and DynamicToken is the type UserSignature, -// which together yield a MeadowcapAuthorisationToken. - -pub type StaticToken = meadowcap::McCapability; -pub type ValidatedStaticToken = meadowcap::ValidatedCapability; -pub type DynamicToken = meadowcap::UserSignature; - -/// Whereas write access control is baked into the Willow data model, -/// read access control resides in the replication layer. -/// To manage read access via capabilities, all peers must cooperate in sending Entries only to peers -/// who have presented a valid read capability for the Entry. -/// We describe the details in a capability-system-agnostic way here. -/// To use Meadowcap for this approach, simply choose the type of valid McCapabilities with access mode read as the read capabilities. -pub type ReadCapability = meadowcap::McCapability; - -/// Whenever a peer is granted a complete read capability of non-empty path, -/// it should also be granted a corresponding subspace capability. -/// Each subspace capability must have a single receiver (a public key of some signature scheme), -/// and a single granted namespace (a NamespaceId). -/// The receiver can authenticate itself by signing a collaboratively selected nonce. -pub type SubspaceCapability = Arc; - -pub type SyncSignature = meadowcap::UserSignature; - -pub type Receiver = meadowcap::UserPublicKey; - -/// Data from the initial transmission -/// -/// This happens before the session is initialized. -#[derive(Debug)] -pub struct InitialTransmission { - /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. - pub our_nonce: AccessChallenge, - /// The [`ChallengeHash`] we received from the remote. - pub received_commitment: ChallengeHash, - /// The maximum payload size we received from the remote. - pub their_max_payload_size: u64, -} - -/// Represents an authorisation to read an area of data in a Namespace. -#[derive(Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] -pub struct ReadAuthorisation(ReadCapability, Option); - -impl ReadAuthorisation { - pub fn new(read_cap: ReadCapability, subspace_cap: Option) -> Self { - Self(read_cap, subspace_cap) - } - - pub fn new_owned(namespace_secret: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { - let read_cap = ReadCapability::new_owned(namespace_secret, user_key, AccessMode::ReadOnly); - let subspace_cap = Arc::new(meadowcap::McSubspaceCapability::new( - namespace_secret, - user_key, - )); - Self::new(read_cap, Some(subspace_cap)) - } - - pub fn read_cap(&self) -> &ReadCapability { - &self.0 - } - - pub fn subspace_cap(&self) -> Option<&SubspaceCapability> { - self.1.as_ref() - } - - pub fn namespace(&self) -> NamespaceId { - self.0.granted_namespace().id() - } - - pub fn delegate( - &self, - user_secret: &UserSecretKey, - new_user: UserPublicKey, - new_area: Area, - ) -> anyhow::Result { - let subspace_cap = match self.subspace_cap() { - Some(subspace_cap) if new_area.subspace.is_any() && !new_area.path.is_empty() => { - Some(Arc::new(subspace_cap.delegate(user_secret, new_user)?)) - } - _ => None, - }; - let read_cap = self.read_cap().delegate(user_secret, new_user, new_area)?; - Ok(Self::new(read_cap, subspace_cap)) - } -} - -/// The different resource handles employed by the WGPS. -#[derive(Debug, Serialize, Deserialize, strum::Display)] -pub enum HandleType { - /// Resource handle for the private set intersection part of private area intersection. - /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: - /// * pending (waiting for the other peer to perform scalar multiplication), - /// * completed (both peers performed scalar multiplication). - Intersection, - - /// Resource handle for [`ReadCapability`] that certify access to some Entries. - Capability, - - /// Resource handle for [`AreaOfInterest`]s that peers wish to sync. - AreaOfInterest, - - /// Resource handle that controls the matching from Payload transmissions to Payload requests. - PayloadRequest, - - /// Resource handle for [`StaticToken`]s that peers need to transmit. - StaticToken, -} - -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] -pub enum Channel { - Control, - Logical(LogicalChannel), -} - -impl Channel { - pub const COUNT: usize = LogicalChannel::COUNT + 1; - - pub fn all() -> [Channel; LogicalChannel::COUNT + 1] { - // TODO: do this without allocation - // https://users.rust-lang.org/t/how-to-concatenate-array-literals-in-compile-time/21141/3 - [Self::Control] - .into_iter() - .chain(LogicalChannel::VARIANTS.iter().copied().map(Self::Logical)) - .collect::>() - .try_into() - .expect("static length") - } - - pub fn fmt_short(&self) -> &'static str { - match self { - Channel::Control => "Ctl", - Channel::Logical(ch) => ch.fmt_short(), - } - } - - pub fn id(&self) -> u8 { - match self { - Channel::Control => 0, - Channel::Logical(ch) => ch.id(), - } - } - - pub fn from_id(id: u8) -> Result { - match id { - 0 => Ok(Self::Control), - _ => { - let ch = LogicalChannel::from_id(id)?; - Ok(Self::Logical(ch)) - } - } - } -} - -/// The different logical channels employed by the WGPS. -#[derive( - Debug, - Serialize, - Deserialize, - Copy, - Clone, - Eq, - PartialEq, - Hash, - strum::EnumIter, - strum::VariantArray, - strum::EnumCount, -)] -pub enum LogicalChannel { - /// Logical channel for controlling the binding of new IntersectionHandles. - Intersection, - /// Logical channel for controlling the binding of new CapabilityHandles. - Capability, - /// Logical channel for controlling the binding of new AreaOfInterestHandles. - AreaOfInterest, - /// Logical channel for controlling the binding of new StaticTokenHandles. - StaticToken, - /// Logical channel for performing 3d range-based set reconciliation. - Reconciliation, - /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. - Data, - // /// Logical channel for controlling the binding of new PayloadRequestHandles. - // PayloadRequest, -} - -#[derive(Debug, thiserror::Error)] -#[error("invalid channel id")] -pub struct InvalidChannelId; - -impl LogicalChannel { - pub fn all() -> [LogicalChannel; LogicalChannel::COUNT] { - LogicalChannel::VARIANTS - .try_into() - .expect("statically checked") - } - pub fn fmt_short(&self) -> &'static str { - match self { - LogicalChannel::Intersection => "Pai", - LogicalChannel::Reconciliation => "Rec", - LogicalChannel::StaticToken => "StT", - LogicalChannel::Capability => "Cap", - LogicalChannel::AreaOfInterest => "AoI", - LogicalChannel::Data => "Dat", - } - } - - pub fn from_id(id: u8) -> Result { - match id { - 2 => Ok(Self::Intersection), - 3 => Ok(Self::AreaOfInterest), - 4 => Ok(Self::Capability), - 5 => Ok(Self::StaticToken), - 6 => Ok(Self::Reconciliation), - 7 => Ok(Self::Data), - _ => Err(InvalidChannelId), - } - } - - pub fn id(&self) -> u8 { - match self { - LogicalChannel::Intersection => 2, - LogicalChannel::AreaOfInterest => 3, - LogicalChannel::Capability => 4, - LogicalChannel::StaticToken => 5, - LogicalChannel::Reconciliation => 6, - LogicalChannel::Data => 7, - } - } -} - -#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] -pub struct AreaOfInterestHandle(u64); - -#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] -pub struct IntersectionHandle(u64); - -#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] -pub struct CapabilityHandle(u64); - -#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] -pub struct StaticTokenHandle(u64); - -#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] -pub enum ResourceHandle { - AreaOfInterest(AreaOfInterestHandle), - Intersection(IntersectionHandle), - Capability(CapabilityHandle), - StaticToken(StaticTokenHandle), -} - -pub trait IsHandle: - std::fmt::Debug + std::hash::Hash + From + Into + Copy + Eq + PartialEq -{ - fn handle_type(&self) -> HandleType; - fn value(&self) -> u64; -} - -impl IsHandle for CapabilityHandle { - fn handle_type(&self) -> HandleType { - HandleType::Capability - } - fn value(&self) -> u64 { - self.0 - } -} -impl IsHandle for StaticTokenHandle { - fn handle_type(&self) -> HandleType { - HandleType::StaticToken - } - fn value(&self) -> u64 { - self.0 - } -} -impl IsHandle for AreaOfInterestHandle { - fn handle_type(&self) -> HandleType { - HandleType::AreaOfInterest - } - fn value(&self) -> u64 { - self.0 - } -} -impl IsHandle for IntersectionHandle { - fn handle_type(&self) -> HandleType { - HandleType::Intersection - } - fn value(&self) -> u64 { - self.0 - } -} - -/// Complete the commitment scheme to determine the challenge for read authentication. -#[derive(Serialize, Deserialize, PartialEq, Eq, derive_more::Debug)] -pub struct CommitmentReveal { - /// The nonce of the sender, encoded as a big-endian unsigned integer. - #[debug("{}..", iroh_base::base32::fmt_short(self.nonce.0))] - pub nonce: AccessChallenge, -} - -#[derive( - Serialize, - Deserialize, - derive_more::From, - derive_more::TryInto, - derive_more::Debug, - strum::Display, -)] -pub enum Message { - #[debug("{:?}", _0)] - CommitmentReveal(CommitmentReveal), - #[debug("{:?}", _0)] - PaiReplyFragment(PaiReplyFragment), - #[debug("{:?}", _0)] - PaiBindFragment(PaiBindFragment), - #[debug("{:?}", _0)] - PaiRequestSubspaceCapability(PaiRequestSubspaceCapability), - #[debug("{:?}", _0)] - PaiReplySubspaceCapability(Box), - #[debug("{:?}", _0)] - SetupBindStaticToken(SetupBindStaticToken), - #[debug("{:?}", _0)] - SetupBindReadCapability(SetupBindReadCapability), - #[debug("{:?}", _0)] - SetupBindAreaOfInterest(SetupBindAreaOfInterest), - #[debug("{:?}", _0)] - ReconciliationSendFingerprint(ReconciliationSendFingerprint), - #[debug("{:?}", _0)] - ReconciliationAnnounceEntries(ReconciliationAnnounceEntries), - #[debug("{:?}", _0)] - ReconciliationSendEntry(ReconciliationSendEntry), - #[debug("{:?}", _0)] - ReconciliationSendPayload(ReconciliationSendPayload), - #[debug("{:?}", _0)] - ReconciliationTerminatePayload(ReconciliationTerminatePayload), - #[debug("{:?}", _0)] - DataSendEntry(DataSendEntry), - #[debug("{:?}", _0)] - DataSendPayload(DataSendPayload), - #[debug("{:?}", _0)] - DataSetMetadata(DataSetMetadata), - // DataBindPayloadRequest - // DataReplyPayload - #[debug("{:?}", _0)] - ControlIssueGuarantee(ControlIssueGuarantee), - #[debug("{:?}", _0)] - ControlAbsolve(ControlAbsolve), - #[debug("{:?}", _0)] - ControlPlead(ControlPlead), - #[debug("{:?}", _0)] - ControlAnnounceDropping(ControlAnnounceDropping), - #[debug("{:?}", _0)] - ControlApologise(ControlApologise), - #[debug("{:?}", _0)] - ControlFreeHandle(ControlFreeHandle), -} - -impl Message { - pub fn same_kind(&self, other: &Self) -> bool { - std::mem::discriminant(self) == std::mem::discriminant(other) - } - - pub fn covers_region(&self) -> Option<(AreaOfInterestHandle, u64)> { - match self { - Message::ReconciliationSendFingerprint(msg) => { - msg.covers.map(|covers| (msg.receiver_handle, covers)) - } - Message::ReconciliationAnnounceEntries(msg) => { - msg.covers.map(|covers| (msg.receiver_handle, covers)) - } - _ => None, - } - } -} - -impl Encoder for Message { - fn encoded_len(&self) -> usize { - let data_len = postcard::experimental::serialized_size(&self).unwrap(); - let header_len = 4; - data_len + header_len - } - - fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { - let len = postcard::experimental::serialized_size(&self).unwrap() as u32; - out.write_all(&len.to_be_bytes())?; - postcard::to_io(self, out)?; - Ok(()) - } -} - -impl Decoder for Message { - fn decode_from(data: &[u8]) -> anyhow::Result> { - // tracing::debug!(input_len = data.len(), "Message decode: start"); - if data.len() < 4 { - return Ok(DecodeOutcome::NeedMoreData); - } - let len = u32::from_be_bytes(data[..4].try_into().expect("just checked")) as usize; - // tracing::debug!(msg_len = len, "Message decode: parsed len"); - let end = len + 4; - if data.len() < end { - // tracing::debug!("Message decode: need more data"); - return Ok(DecodeOutcome::NeedMoreData); - } - // tracing::debug!("Message decode: now deserilalize"); - let res = postcard::from_bytes(&data[4..end]); - // tracing::debug!(?res, "Message decode: res"); - let item = res?; - // tracing::debug!(?item, "Message decode: decoded!"); - Ok(DecodeOutcome::Decoded { - item, - consumed: end, - }) - } -} - -impl Message { - pub fn channel(&self) -> Channel { - match self { - Message::PaiBindFragment(_) | Message::PaiReplyFragment(_) => { - Channel::Logical(LogicalChannel::Intersection) - } - - Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), - Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), - Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), - - Message::ReconciliationSendFingerprint(_) - | Message::ReconciliationAnnounceEntries(_) - | Message::ReconciliationSendEntry(_) - | Message::ReconciliationSendPayload(_) - | Message::ReconciliationTerminatePayload(_) => { - Channel::Logical(LogicalChannel::Reconciliation) - } - - Message::DataSendEntry(_) - | Message::DataSendPayload(_) - | Message::DataSetMetadata(_) => Channel::Logical(LogicalChannel::Data), - - Message::CommitmentReveal(_) - | Message::PaiRequestSubspaceCapability(_) - | Message::PaiReplySubspaceCapability(_) - | Message::ControlIssueGuarantee(_) - | Message::ControlAbsolve(_) - | Message::ControlPlead(_) - | Message::ControlAnnounceDropping(_) - | Message::ControlApologise(_) - | Message::ControlFreeHandle(_) => Channel::Control, - } - } -} - -// #[derive(Debug, derive_more::From, derive_more::TryInto)] -// pub enum ChanMessage { -// Control(ControlMessage), -// Reconciliation(ReconciliationMessage), -// } -// impl From for ChanMessage { -// fn from(value: Message) -> Self { -// match value { -// Message::ReconciliationSendFingerprint(msg) => Self::Reconciliation(msg.into()), -// Message::ReconciliationAnnounceEntries(msg) => Self::Reconciliation(msg.into()), -// Message::ReconciliationSendEntry(msg) => Self::Reconciliation(msg.into()), -// -// Message::CommitmentReveal(msg) => Self::Control(msg.into()), -// Message::SetupBindStaticToken(msg) => Self::Control(msg.into()), -// Message::SetupBindReadCapability(msg) => Self::Control(msg.into()), -// Message::SetupBindAreaOfInterest(msg) => Self::Control(msg.into()), -// -// Message::ControlIssueGuarantee(msg) => Self::Control(msg.into()), -// Message::ControlAbsolve(msg) => Self::Control(msg.into()), -// Message::ControlPlead(msg) => Self::Control(msg.into()), -// Message::ControlAnnounceDropping(msg) => Self::Control(msg.into()), -// Message::ControlApologise(msg) => Self::Control(msg.into()), -// Message::ControlFreeHandle(msg) => Self::Control(msg.into()), -// } -// } -// } -// impl From for Message { -// fn from(message: ChanMessage) -> Self { -// match message { -// ChanMessage::Control(message) => message.into(), -// ChanMessage::Reconciliation(message) => message.into(), -// } -// } -// } - -#[derive(Debug, derive_more::From, strum::Display)] -pub enum ReconciliationMessage { - SendFingerprint(ReconciliationSendFingerprint), - AnnounceEntries(ReconciliationAnnounceEntries), - SendEntry(ReconciliationSendEntry), - SendPayload(ReconciliationSendPayload), - TerminatePayload(ReconciliationTerminatePayload), -} - -impl TryFrom for ReconciliationMessage { - type Error = (); - fn try_from(message: Message) -> Result { - match message { - Message::ReconciliationSendFingerprint(msg) => Ok(msg.into()), - Message::ReconciliationAnnounceEntries(msg) => Ok(msg.into()), - Message::ReconciliationSendEntry(msg) => Ok(msg.into()), - Message::ReconciliationSendPayload(msg) => Ok(msg.into()), - Message::ReconciliationTerminatePayload(msg) => Ok(msg.into()), - _ => Err(()), - } - } -} - -impl From for Message { - fn from(message: ReconciliationMessage) -> Self { - match message { - ReconciliationMessage::SendFingerprint(message) => message.into(), - ReconciliationMessage::AnnounceEntries(message) => message.into(), - ReconciliationMessage::SendEntry(message) => message.into(), - ReconciliationMessage::SendPayload(message) => message.into(), - ReconciliationMessage::TerminatePayload(message) => message.into(), - } - } -} - -#[derive(Debug, derive_more::From, strum::Display)] -pub enum DataMessage { - SendEntry(DataSendEntry), - SendPayload(DataSendPayload), - SetMetadata(DataSetMetadata), -} - -impl TryFrom for DataMessage { - type Error = (); - fn try_from(message: Message) -> Result { - match message { - Message::DataSendEntry(msg) => Ok(msg.into()), - Message::DataSendPayload(msg) => Ok(msg.into()), - Message::DataSetMetadata(msg) => Ok(msg.into()), - _ => Err(()), - } - } -} - -impl From for Message { - fn from(message: DataMessage) -> Self { - match message { - DataMessage::SendEntry(message) => message.into(), - DataMessage::SendPayload(message) => message.into(), - DataMessage::SetMetadata(message) => message.into(), - } - } -} - -#[derive(Debug, derive_more::From, strum::Display)] -pub enum IntersectionMessage { - BindFragment(PaiBindFragment), - ReplyFragment(PaiReplyFragment), -} - -impl TryFrom for IntersectionMessage { - type Error = (); - fn try_from(message: Message) -> Result { - match message { - Message::PaiBindFragment(msg) => Ok(msg.into()), - Message::PaiReplyFragment(msg) => Ok(msg.into()), - _ => Err(()), - } - } -} - -impl From for Message { - fn from(message: IntersectionMessage) -> Self { - match message { - IntersectionMessage::BindFragment(msg) => msg.into(), - IntersectionMessage::ReplyFragment(msg) => msg.into(), - } - } -} - -// impl Encoder for ReconciliationMessage { -// fn encoded_len(&self) -> usize { -// Message::from(se) -// todo!() -// } -// -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// todo!() -// } -// } -// -// #[derive(Debug, derive_more::From)] -// pub enum ControlMessage { -// CommitmentReveal(CommitmentReveal), -// // TODO: move to CapabilityChannel -// SetupBindReadCapability(SetupBindReadCapability), -// // TODO: move to StaticTokenChannel -// SetupBindStaticToken(SetupBindStaticToken), -// // TODO: move to AreaOfInterestChannel -// SetupBindAreaOfInterest(SetupBindAreaOfInterest), -// -// IssueGuarantee(ControlIssueGuarantee), -// Absolve(ControlAbsolve), -// Plead(ControlPlead), -// AnnounceDropping(ControlAnnounceDropping), -// Apologise(ControlApologise), -// -// FreeHandle(ControlFreeHandle), -// } -// -// impl From for Message { -// fn from(message: ControlMessage) -> Self { -// match message { -// ControlMessage::CommitmentReveal(message) => message.into(), -// ControlMessage::SetupBindReadCapability(message) => message.into(), -// ControlMessage::SetupBindStaticToken(message) => message.into(), -// ControlMessage::SetupBindAreaOfInterest(message) => message.into(), -// ControlMessage::IssueGuarantee(message) => message.into(), -// ControlMessage::Absolve(message) => message.into(), -// ControlMessage::Plead(message) => message.into(), -// ControlMessage::AnnounceDropping(message) => message.into(), -// ControlMessage::Apologise(message) => message.into(), -// ControlMessage::FreeHandle(message) => message.into(), -// } -// } -// } - -/// Bind a ReadCapability to a CapabilityHandle. -/// -/// The SetupBindReadCapability messages let peers bind a ReadCapability for later reference. -/// To do so, they must present a valid SyncSignature over their challenge, thus demonstrating -/// they hold the secret key corresponding to receiver of the ReadCapability. -/// -/// These requirements allow us to encode SetupBindReadCapability messages more efficiently. -/// The handle must be bound to the fragment (primary, if possible) of the capability with the -/// longest Path prefix that is in the intersection of the two peers’ fragments. -/// -/// SetupBindReadCapability messages use the CapabilityChannel. -#[derive(Debug, Serialize, Deserialize)] -pub struct SetupBindReadCapability { - /// A ReadCapability that the peer wishes to reference in future messages. - pub capability: ReadCapability, - - /// The IntersectionHandle, bound by the sender, of the capability’s fragment - /// with the longest Path in the intersection of the fragments. - /// - /// If both a primary and secondary such fragment exist, choose the primary one. - pub handle: IntersectionHandle, - - /// The SyncSignature issued by the Receiver of the capability over the sender’s challenge. - pub signature: SyncSignature, -} - -/// Bind an AreaOfInterest to an AreaOfInterestHandle. -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] -pub struct SetupBindAreaOfInterest { - /// An AreaOfInterest that the peer wishes to reference in future messages. - pub area_of_interest: AreaOfInterest, - /// A CapabilityHandle bound by the sender that grants access to all entries in the message’s area_of_interest. - pub authorisation: CapabilityHandle, -} - -impl SetupBindAreaOfInterest { - pub fn area(&self) -> &Area { - &self.area_of_interest.area - } -} - -/// Bind a StaticToken to a StaticTokenHandle. -#[derive(Debug, Serialize, Deserialize)] -pub struct SetupBindStaticToken { - /// The StaticToken to bind. - pub static_token: StaticToken, -} - -/// Send a Fingerprint as part of 3d range-based set reconciliation. -#[derive(Debug, Serialize, Deserialize)] -pub struct ReconciliationSendFingerprint { - /// The 3dRange whose Fingerprint is transmitted. - pub range: ThreeDRange, - /// The Fingerprint of the range, that is, of all LengthyEntries the peer has in the range. - pub fingerprint: Fingerprint, - /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. - pub sender_handle: AreaOfInterestHandle, - /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. - pub receiver_handle: AreaOfInterestHandle, - /// If this message is the last of a set of messages that together cover the range of some prior - /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that - /// [`ReconciliationSendFingerprint`] message. Otherwise, none. - pub covers: Option, -} - -impl ReconciliationSendFingerprint { - pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { - (self.receiver_handle, self.sender_handle) - } -} - -/// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct ReconciliationAnnounceEntries { - /// The 3dRange whose LengthyEntries to transmit. - pub range: ThreeDRange, - /// The number of Entries the sender has in the range. - pub count: u64, - /// A boolean flag to indicate whether the sender wishes to receive a ReconciliationAnnounceEntries message for the same 3dRange in return. - pub want_response: bool, - /// Whether the sender promises to send the Entries in the range sorted from oldest to newest. - pub will_sort: bool, - /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. - pub sender_handle: AreaOfInterestHandle, - /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. - pub receiver_handle: AreaOfInterestHandle, - /// If this message is the last of a set of messages that together cover the range of some prior - /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that - /// [`ReconciliationSendFingerprint`] message. Otherwise, none. - pub covers: Option, -} - -impl ReconciliationAnnounceEntries { - pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { - (self.receiver_handle, self.sender_handle) - } -} - -/// Transmit a [`LengthyEntry`] as part of 3d range-based set reconciliation. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReconciliationSendEntry { - /// The LengthyEntry itself. - pub entry: LengthyEntry, - /// A StaticTokenHandle, bound by the sender of this message, that is bound to the static part of the entry’s AuthorisationToken. - pub static_token_handle: StaticTokenHandle, - /// The dynamic part of the entry’s AuthorisationToken. - pub dynamic_token: DynamicToken, -} - -/// Transmit some transformed Payload bytes. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReconciliationSendPayload { - // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. - pub bytes: bytes::Bytes, -} - -/// Indicate that no more bytes will be transmitted for the currently transmitted Payload as part of set reconciliation. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReconciliationTerminatePayload; - -/// Transmit an AuthorisedEntry to the other peer, and optionally prepare transmission of its Payload. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DataSendEntry { - /// The Entry to transmit. - pub entry: Entry, - /// A [`StaticTokenHandle`] bound to the StaticToken of the Entry to transmit. - pub static_token_handle: StaticTokenHandle, - /// The DynamicToken of the Entry to transmit. - pub dynamic_token: DynamicToken, - /// The offset in the Payload in bytes at which Payload transmission will begin. - /// - /// If this is equal to the Entry’s payload_length, the Payload will not be transmitted. - pub offset: u64, -} - -/// Transmit some transformed Payload bytes. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DataSendPayload { - // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. - pub bytes: bytes::Bytes, -} - -/// Express preferences for Payload transfer in the intersection of two AreaOfInterests. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DataSetMetadata { - /// An AreaOfInterestHandle, bound by the sender of this message. - sender_handle: AreaOfInterestHandle, - /// An AreaOfInterestHandle, bound by the receiver of this message. - receiver_handle: AreaOfInterestHandle, - // Whether the other peer should eagerly forward Payloads in this intersection. - is_eager: bool, -} - -// /// Bind an Entry to a PayloadRequestHandle and request transmission of its Payload from an offset. -// #[derive(Debug, Clone, Serialize, Deserialize)] -// pub struct DataBindPayloadRequest { -// /// The Entry to request. -// entry: Entry, -// /// The offset in the Payload starting from which the sender would like to receive the Payload bytes. -// offset: u64, -// /// A resource handle for a ReadCapability bound by the sender that grants them read access to the bound Entry. -// capability: CapabilityHandle, -// } -// -// /// Set up the state for replying to a DataBindPayloadRequest message. -// #[derive(Debug, Clone, Serialize, Deserialize)] -// pub struct DataReplyPayload { -// /// The PayloadRequestHandle to which to reply. -// handle: u64, -// } - -/// An Entry together with information about how much of its Payload a peer holds. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LengthyEntry { - /// The Entry in question. - pub entry: Entry, - /// The number of consecutive bytes from the start of the entry’s Payload that the peer holds. - pub available: u64, -} - -impl LengthyEntry { - pub fn new(entry: Entry, available: u64) -> Self { - Self { entry, available } - } -} - -#[derive(Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] -pub struct Fingerprint(pub [u8; 32]); - -impl fmt::Debug for Fingerprint { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(self.0)) - } -} - -impl Fingerprint { - pub fn add_entry(&mut self, entry: &Entry) { - // TODO: Don't allocate - let next = - Fingerprint(*Hash::new(entry.encode().expect("encoding not to fail")).as_bytes()); - *self ^= next; - } - - pub fn add_entries<'a>(&mut self, iter: impl Iterator) { - for entry in iter { - self.add_entry(entry); - } - } - - pub fn from_entries<'a>(iter: impl Iterator) -> Self { - let mut this = Self::default(); - this.add_entries(iter); - this - } - - pub fn is_empty(&self) -> bool { - *self == Self::default() - } -} - -impl std::ops::BitXorAssign for Fingerprint { - fn bitxor_assign(&mut self, rhs: Self) { - for (a, b) in self.0.iter_mut().zip(rhs.0.iter()) { - *a ^= b; - } - } -} - -/// Make a binding promise of available buffer capacity to the other peer -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlIssueGuarantee { - pub amount: u64, - pub channel: LogicalChannel, -} - -/// Allow the other peer to reduce its total buffer capacity by amount. -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlAbsolve { - pub amount: u64, - pub channel: LogicalChannel, -} - -/// Ask the other peer to send an ControlAbsolve message -/// such that the receiver remaining guarantees will be target. -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlPlead { - pub target: u64, - pub channel: LogicalChannel, -} - -/// The server notifies the client that it has started dropping messages and will continue -/// to do so until it receives an Apologise message. The server must send any outstanding -/// guarantees of the logical channel before sending a AnnounceDropping message. -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlAnnounceDropping { - pub channel: LogicalChannel, -} - -/// The client notifies the server that it can stop dropping messages on this logical channel. -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlApologise { - pub channel: LogicalChannel, -} - -/// Ask the other peer to free a resource handle. -/// -/// This is needed for symmetric protocols where peers act as both client and server simultaneously -/// and bind resource handles to the same handle types. -#[derive(Debug, Serialize, Deserialize)] -pub struct ControlFreeHandle { - handle: u64, - /// Indicates whether the peer sending this message is the one who created the handle (true) or not (false). - mine: bool, - handle_type: HandleType, -} - -pub type PsiGroupBytes = [u8; 32]; - -/// Bind data to an IntersectionHandle for performing private area intersection. -#[derive(derive_more::Debug, Serialize, Deserialize)] -pub struct PaiBindFragment { - /// The result of first applying hash_into_group to some fragment for private area intersection and then performing scalar multiplication with scalar. - #[debug("{}", hex::encode(self.group_member))] - pub group_member: PsiGroupBytes, - /// Set to true if the private set intersection item is a secondary fragment. - pub is_secondary: bool, -} - -/// Finalise private set intersection for a single item. -#[derive(derive_more::Debug, Serialize, Deserialize)] -pub struct PaiReplyFragment { - /// The IntersectionHandle of the PaiBindFragment message which this finalises. - pub handle: IntersectionHandle, - /// The result of performing scalar multiplication between the group_member of the message that this is replying to and scalar. - #[debug("{}", hex::encode(self.group_member))] - pub group_member: PsiGroupBytes, -} - -/// Ask the receiver to send a SubspaceCapability. -#[derive(Debug, Serialize, Deserialize)] -pub struct PaiRequestSubspaceCapability { - /// The IntersectionHandle bound by the sender for the least-specific secondary fragment for whose NamespaceId to request the SubspaceCapability. - pub handle: IntersectionHandle, -} - -/// Send a previously requested SubspaceCapability. -#[derive(Debug, Serialize, Deserialize)] -pub struct PaiReplySubspaceCapability { - /// The handle of the PaiRequestSubspaceCapability message that this answers (hence, an IntersectionHandle bound by the receiver of this message). - pub handle: IntersectionHandle, - /// A SubspaceCapability whose granted namespace corresponds to the request this answers. - pub capability: SubspaceCapability, - /// The SyncSubspaceSignature issued by the receiver of the capability over the sender’s challenge. - pub signature: SyncSignature, -} +pub use super::wgps::*; diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs new file mode 100644 index 0000000000..f16a7323af --- /dev/null +++ b/iroh-willow/src/proto/wgps.rs @@ -0,0 +1,18 @@ +pub mod channels; +pub mod fingerprint; +pub mod handles; +pub mod messages; + +pub use channels::*; +pub use fingerprint::*; +pub use handles::*; +pub use messages::*; + +pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; + +/// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: +/// when an Entry’s payload_length is strictly greater than the maximum payload size, +/// its Payload may only be transmitted when explicitly requested. +/// +/// The value is 4096. +pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); diff --git a/iroh-willow/src/proto/wgps/channels.rs b/iroh-willow/src/proto/wgps/channels.rs new file mode 100644 index 0000000000..c38fd993a3 --- /dev/null +++ b/iroh-willow/src/proto/wgps/channels.rs @@ -0,0 +1,161 @@ +use serde::{Deserialize, Serialize}; +use strum::{EnumCount, VariantArray}; + +use super::messages::Message; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] +pub enum Channel { + Control, + Logical(LogicalChannel), +} + +impl Channel { + pub const COUNT: usize = LogicalChannel::COUNT + 1; + + pub fn all() -> [Channel; LogicalChannel::COUNT + 1] { + // TODO: do this without allocation + // https://users.rust-lang.org/t/how-to-concatenate-array-literals-in-compile-time/21141/3 + [Self::Control] + .into_iter() + .chain(LogicalChannel::VARIANTS.iter().copied().map(Self::Logical)) + .collect::>() + .try_into() + .expect("static length") + } + + pub fn fmt_short(&self) -> &'static str { + match self { + Channel::Control => "Ctl", + Channel::Logical(ch) => ch.fmt_short(), + } + } + + pub fn id(&self) -> u8 { + match self { + Channel::Control => 0, + Channel::Logical(ch) => ch.id(), + } + } + + pub fn from_id(id: u8) -> Result { + match id { + 0 => Ok(Self::Control), + _ => { + let ch = LogicalChannel::from_id(id)?; + Ok(Self::Logical(ch)) + } + } + } +} + +/// The different logical channels employed by the WGPS. +#[derive( + Debug, + Serialize, + Deserialize, + Copy, + Clone, + Eq, + PartialEq, + Hash, + strum::EnumIter, + strum::VariantArray, + strum::EnumCount, +)] +pub enum LogicalChannel { + /// Logical channel for controlling the binding of new IntersectionHandles. + Intersection, + /// Logical channel for controlling the binding of new CapabilityHandles. + Capability, + /// Logical channel for controlling the binding of new AreaOfInterestHandles. + AreaOfInterest, + /// Logical channel for controlling the binding of new StaticTokenHandles. + StaticToken, + /// Logical channel for performing 3d range-based set reconciliation. + Reconciliation, + /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. + Data, + // /// Logical channel for controlling the binding of new PayloadRequestHandles. + // PayloadRequest, +} + +#[derive(Debug, thiserror::Error)] +#[error("invalid channel id")] +pub struct InvalidChannelId; + +impl LogicalChannel { + pub fn all() -> [LogicalChannel; LogicalChannel::COUNT] { + LogicalChannel::VARIANTS + .try_into() + .expect("statically checked") + } + pub fn fmt_short(&self) -> &'static str { + match self { + LogicalChannel::Intersection => "Pai", + LogicalChannel::Reconciliation => "Rec", + LogicalChannel::StaticToken => "StT", + LogicalChannel::Capability => "Cap", + LogicalChannel::AreaOfInterest => "AoI", + LogicalChannel::Data => "Dat", + } + } + + pub fn from_id(id: u8) -> Result { + match id { + 2 => Ok(Self::Intersection), + 3 => Ok(Self::AreaOfInterest), + 4 => Ok(Self::Capability), + 5 => Ok(Self::StaticToken), + 6 => Ok(Self::Reconciliation), + 7 => Ok(Self::Data), + _ => Err(InvalidChannelId), + } + } + + pub fn id(&self) -> u8 { + match self { + LogicalChannel::Intersection => 2, + LogicalChannel::AreaOfInterest => 3, + LogicalChannel::Capability => 4, + LogicalChannel::StaticToken => 5, + LogicalChannel::Reconciliation => 6, + LogicalChannel::Data => 7, + } + } +} + +impl Message { + pub fn channel(&self) -> Channel { + match self { + Message::PaiBindFragment(_) | Message::PaiReplyFragment(_) => { + Channel::Logical(LogicalChannel::Intersection) + } + + Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), + Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), + Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), + + Message::ReconciliationSendFingerprint(_) + | Message::ReconciliationAnnounceEntries(_) + | Message::ReconciliationSendEntry(_) + | Message::ReconciliationSendPayload(_) + | Message::ReconciliationTerminatePayload(_) => { + Channel::Logical(LogicalChannel::Reconciliation) + } + + Message::DataSendEntry(_) + | Message::DataSendPayload(_) + | Message::DataSetMetadata(_) => Channel::Logical(LogicalChannel::Data), + + Message::CommitmentReveal(_) + | Message::PaiRequestSubspaceCapability(_) + | Message::PaiReplySubspaceCapability(_) + | Message::ControlIssueGuarantee(_) + | Message::ControlAbsolve(_) + | Message::ControlPlead(_) + | Message::ControlAnnounceDropping(_) + | Message::ControlApologise(_) + | Message::ControlFreeHandle(_) => Channel::Control, + } + } +} diff --git a/iroh-willow/src/proto/wgps/fingerprint.rs b/iroh-willow/src/proto/wgps/fingerprint.rs new file mode 100644 index 0000000000..61f11f4271 --- /dev/null +++ b/iroh-willow/src/proto/wgps/fingerprint.rs @@ -0,0 +1,48 @@ +use std::fmt; + +use iroh_blobs::Hash; +use serde::{Deserialize, Serialize}; + +use crate::proto::data_model::Entry; + +#[derive(Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] +pub struct Fingerprint(pub [u8; 32]); + +impl fmt::Debug for Fingerprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(self.0)) + } +} + +impl Fingerprint { + pub fn add_entry(&mut self, entry: &Entry) { + // TODO: Don't allocate + let encoded = entry.encode(); + let next = Fingerprint(*Hash::new(&encoded).as_bytes()); + *self ^= next; + } + + pub fn add_entries<'a>(&mut self, iter: impl Iterator) { + for entry in iter { + self.add_entry(entry); + } + } + + pub fn from_entries<'a>(iter: impl Iterator) -> Self { + let mut this = Self::default(); + this.add_entries(iter); + this + } + + pub fn is_empty(&self) -> bool { + *self == Self::default() + } +} + +impl std::ops::BitXorAssign for Fingerprint { + fn bitxor_assign(&mut self, rhs: Self) { + for (a, b) in self.0.iter_mut().zip(rhs.0.iter()) { + *a ^= b; + } + } +} diff --git a/iroh-willow/src/proto/wgps/handles.rs b/iroh-willow/src/proto/wgps/handles.rs new file mode 100644 index 0000000000..0f4124250a --- /dev/null +++ b/iroh-willow/src/proto/wgps/handles.rs @@ -0,0 +1,83 @@ +use serde::{Deserialize, Serialize}; + +/// The different resource handles employed by the WGPS. +#[derive(Debug, Serialize, Deserialize, strum::Display)] +pub enum HandleType { + /// Resource handle for the private set intersection part of private area intersection. + /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: + /// * pending (waiting for the other peer to perform scalar multiplication), + /// * completed (both peers performed scalar multiplication). + Intersection, + + /// Resource handle for [`ReadCapability`] that certify access to some Entries. + Capability, + + /// Resource handle for [`AreaOfInterest`]s that peers wish to sync. + AreaOfInterest, + + /// Resource handle that controls the matching from Payload transmissions to Payload requests. + PayloadRequest, + + /// Resource handle for [`StaticToken`]s that peers need to transmit. + StaticToken, +} + +pub trait IsHandle: + std::fmt::Debug + std::hash::Hash + From + Into + Copy + Eq + PartialEq +{ + fn handle_type(&self) -> HandleType; + fn value(&self) -> u64; +} + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct AreaOfInterestHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct IntersectionHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct CapabilityHandle(u64); + +#[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub struct StaticTokenHandle(u64); + +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] +pub enum ResourceHandle { + AreaOfInterest(AreaOfInterestHandle), + Intersection(IntersectionHandle), + Capability(CapabilityHandle), + StaticToken(StaticTokenHandle), +} + +impl IsHandle for CapabilityHandle { + fn handle_type(&self) -> HandleType { + HandleType::Capability + } + fn value(&self) -> u64 { + self.0 + } +} +impl IsHandle for StaticTokenHandle { + fn handle_type(&self) -> HandleType { + HandleType::StaticToken + } + fn value(&self) -> u64 { + self.0 + } +} +impl IsHandle for AreaOfInterestHandle { + fn handle_type(&self) -> HandleType { + HandleType::AreaOfInterest + } + fn value(&self) -> u64 { + self.0 + } +} +impl IsHandle for IntersectionHandle { + fn handle_type(&self) -> HandleType { + HandleType::Intersection + } + fn value(&self) -> u64 { + self.0 + } +} diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs new file mode 100644 index 0000000000..6121f4cff4 --- /dev/null +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -0,0 +1,531 @@ +use std::io::Write; + +use serde::{Deserialize, Serialize}; + +use crate::{ + proto::{ + challenge::AccessChallenge, + grouping::{serde_encoding::SerdeRange3d, Area, AreaOfInterest}, + meadowcap::{self}, + willow::Entry, + }, + util::codec::{DecodeOutcome, Decoder, Encoder}, +}; + +use super::{ + channels::LogicalChannel, + fingerprint::Fingerprint, + handles::{ + AreaOfInterestHandle, CapabilityHandle, HandleType, IntersectionHandle, StaticTokenHandle, + }, +}; + +pub type StaticToken = meadowcap::serde_encoding::SerdeMcCapability; +// pub type ValidatedStaticToken = meadowcap::ValidatedCapability; +pub type DynamicToken = meadowcap::UserSignature; + +/// Whereas write access control is baked into the Willow data model, +/// read access control resides in the replication layer. +/// To manage read access via capabilities, all peers must cooperate in sending Entries only to peers +/// who have presented a valid read capability for the Entry. +/// We describe the details in a capability-system-agnostic way here. +/// To use Meadowcap for this approach, simply choose the type of valid McCapabilities with access mode read as the read capabilities. +pub type ReadCapability = meadowcap::serde_encoding::SerdeMcCapability; + +/// Whenever a peer is granted a complete read capability of non-empty path, +/// it should also be granted a corresponding subspace capability. +/// Each subspace capability must have a single receiver (a public key of some signature scheme), +/// and a single granted namespace (a NamespaceId). +/// The receiver can authenticate itself by signing a collaboratively selected nonce. +pub type SubspaceCapability = meadowcap::serde_encoding::SerdeMcSubspaceCapability; + +pub type SyncSignature = meadowcap::UserSignature; + +pub type Receiver = meadowcap::UserPublicKey; + +/// An Entry together with information about how much of its Payload a peer holds. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LengthyEntry { + /// The Entry in question. + pub entry: Entry, + /// The number of consecutive bytes from the start of the entry’s Payload that the peer holds. + pub available: u64, +} + +impl LengthyEntry { + pub fn new(entry: Entry, available: u64) -> Self { + Self { entry, available } + } +} + +#[derive( + Serialize, + Deserialize, + derive_more::From, + derive_more::TryInto, + derive_more::Debug, + strum::Display, +)] +pub enum Message { + #[debug("{:?}", _0)] + CommitmentReveal(CommitmentReveal), + #[debug("{:?}", _0)] + PaiReplyFragment(PaiReplyFragment), + #[debug("{:?}", _0)] + PaiBindFragment(PaiBindFragment), + #[debug("{:?}", _0)] + PaiRequestSubspaceCapability(PaiRequestSubspaceCapability), + #[debug("{:?}", _0)] + PaiReplySubspaceCapability(Box), + #[debug("{:?}", _0)] + SetupBindStaticToken(SetupBindStaticToken), + #[debug("{:?}", _0)] + SetupBindReadCapability(SetupBindReadCapability), + #[debug("{:?}", _0)] + SetupBindAreaOfInterest(SetupBindAreaOfInterest), + #[debug("{:?}", _0)] + ReconciliationSendFingerprint(ReconciliationSendFingerprint), + #[debug("{:?}", _0)] + ReconciliationAnnounceEntries(ReconciliationAnnounceEntries), + #[debug("{:?}", _0)] + ReconciliationSendEntry(ReconciliationSendEntry), + #[debug("{:?}", _0)] + ReconciliationSendPayload(ReconciliationSendPayload), + #[debug("{:?}", _0)] + ReconciliationTerminatePayload(ReconciliationTerminatePayload), + #[debug("{:?}", _0)] + DataSendEntry(DataSendEntry), + #[debug("{:?}", _0)] + DataSendPayload(DataSendPayload), + #[debug("{:?}", _0)] + DataSetMetadata(DataSetMetadata), + // DataBindPayloadRequest + // DataReplyPayload + #[debug("{:?}", _0)] + ControlIssueGuarantee(ControlIssueGuarantee), + #[debug("{:?}", _0)] + ControlAbsolve(ControlAbsolve), + #[debug("{:?}", _0)] + ControlPlead(ControlPlead), + #[debug("{:?}", _0)] + ControlAnnounceDropping(ControlAnnounceDropping), + #[debug("{:?}", _0)] + ControlApologise(ControlApologise), + #[debug("{:?}", _0)] + ControlFreeHandle(ControlFreeHandle), +} + +impl Message { + pub fn same_kind(&self, other: &Self) -> bool { + std::mem::discriminant(self) == std::mem::discriminant(other) + } + + pub fn covers_region(&self) -> Option<(AreaOfInterestHandle, u64)> { + match self { + Message::ReconciliationSendFingerprint(msg) => { + msg.covers.map(|covers| (msg.receiver_handle, covers)) + } + Message::ReconciliationAnnounceEntries(msg) => { + msg.covers.map(|covers| (msg.receiver_handle, covers)) + } + _ => None, + } + } +} + +impl Encoder for Message { + fn encoded_len(&self) -> usize { + let data_len = postcard::experimental::serialized_size(&self).unwrap(); + let header_len = 4; + data_len + header_len + } + + fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { + let len = postcard::experimental::serialized_size(&self).unwrap() as u32; + out.write_all(&len.to_be_bytes())?; + postcard::to_io(self, out)?; + Ok(()) + } +} + +impl Decoder for Message { + fn decode_from(data: &[u8]) -> anyhow::Result> { + // tracing::debug!(input_len = data.len(), "Message decode: start"); + if data.len() < 4 { + return Ok(DecodeOutcome::NeedMoreData); + } + let len = u32::from_be_bytes(data[..4].try_into().expect("just checked")) as usize; + // tracing::debug!(msg_len = len, "Message decode: parsed len"); + let end = len + 4; + if data.len() < end { + // tracing::debug!("Message decode: need more data"); + return Ok(DecodeOutcome::NeedMoreData); + } + // tracing::debug!("Message decode: now deserilalize"); + let res = postcard::from_bytes(&data[4..end]); + // tracing::debug!(?res, "Message decode: res"); + let item = res?; + // tracing::debug!(?item, "Message decode: decoded!"); + Ok(DecodeOutcome::Decoded { + item, + consumed: end, + }) + } +} + +#[derive(Debug, derive_more::From, strum::Display)] +pub enum ReconciliationMessage { + SendFingerprint(ReconciliationSendFingerprint), + AnnounceEntries(ReconciliationAnnounceEntries), + SendEntry(ReconciliationSendEntry), + SendPayload(ReconciliationSendPayload), + TerminatePayload(ReconciliationTerminatePayload), +} + +impl TryFrom for ReconciliationMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::ReconciliationSendFingerprint(msg) => Ok(msg.into()), + Message::ReconciliationAnnounceEntries(msg) => Ok(msg.into()), + Message::ReconciliationSendEntry(msg) => Ok(msg.into()), + Message::ReconciliationSendPayload(msg) => Ok(msg.into()), + Message::ReconciliationTerminatePayload(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} + +impl From for Message { + fn from(message: ReconciliationMessage) -> Self { + match message { + ReconciliationMessage::SendFingerprint(message) => message.into(), + ReconciliationMessage::AnnounceEntries(message) => message.into(), + ReconciliationMessage::SendEntry(message) => message.into(), + ReconciliationMessage::SendPayload(message) => message.into(), + ReconciliationMessage::TerminatePayload(message) => message.into(), + } + } +} + +#[derive(Debug, derive_more::From, strum::Display)] +pub enum DataMessage { + SendEntry(DataSendEntry), + SendPayload(DataSendPayload), + SetMetadata(DataSetMetadata), +} + +impl TryFrom for DataMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::DataSendEntry(msg) => Ok(msg.into()), + Message::DataSendPayload(msg) => Ok(msg.into()), + Message::DataSetMetadata(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} + +impl From for Message { + fn from(message: DataMessage) -> Self { + match message { + DataMessage::SendEntry(message) => message.into(), + DataMessage::SendPayload(message) => message.into(), + DataMessage::SetMetadata(message) => message.into(), + } + } +} + +#[derive(Debug, derive_more::From, strum::Display)] +pub enum IntersectionMessage { + BindFragment(PaiBindFragment), + ReplyFragment(PaiReplyFragment), +} + +impl TryFrom for IntersectionMessage { + type Error = (); + fn try_from(message: Message) -> Result { + match message { + Message::PaiBindFragment(msg) => Ok(msg.into()), + Message::PaiReplyFragment(msg) => Ok(msg.into()), + _ => Err(()), + } + } +} + +impl From for Message { + fn from(message: IntersectionMessage) -> Self { + match message { + IntersectionMessage::BindFragment(msg) => msg.into(), + IntersectionMessage::ReplyFragment(msg) => msg.into(), + } + } +} + +/// Complete the commitment scheme to determine the challenge for read authentication. +#[derive(Serialize, Deserialize, PartialEq, Eq, derive_more::Debug)] +pub struct CommitmentReveal { + /// The nonce of the sender, encoded as a big-endian unsigned integer. + #[debug("{}..", iroh_base::base32::fmt_short(self.nonce.as_bytes()))] + pub nonce: AccessChallenge, +} + +/// Bind a ReadCapability to a CapabilityHandle. +/// +/// The SetupBindReadCapability messages let peers bind a ReadCapability for later reference. +/// To do so, they must present a valid SyncSignature over their challenge, thus demonstrating +/// they hold the secret key corresponding to receiver of the ReadCapability. +/// +/// These requirements allow us to encode SetupBindReadCapability messages more efficiently. +/// The handle must be bound to the fragment (primary, if possible) of the capability with the +/// longest Path prefix that is in the intersection of the two peers’ fragments. +/// +/// SetupBindReadCapability messages use the CapabilityChannel. +#[derive(Debug, Serialize, Deserialize)] +pub struct SetupBindReadCapability { + /// A ReadCapability that the peer wishes to reference in future messages. + pub capability: ReadCapability, + + /// The IntersectionHandle, bound by the sender, of the capability’s fragment + /// with the longest Path in the intersection of the fragments. + /// + /// If both a primary and secondary such fragment exist, choose the primary one. + pub handle: IntersectionHandle, + + /// The SyncSignature issued by the Receiver of the capability over the sender’s challenge. + pub signature: SyncSignature, +} + +/// Bind an AreaOfInterest to an AreaOfInterestHandle. +#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +pub struct SetupBindAreaOfInterest { + /// An AreaOfInterest that the peer wishes to reference in future messages. + pub area_of_interest: AreaOfInterest, + /// A CapabilityHandle bound by the sender that grants access to all entries in the message’s area_of_interest. + pub authorisation: CapabilityHandle, +} + +impl SetupBindAreaOfInterest { + pub fn area(&self) -> &Area { + &self.area_of_interest.area + } +} + +/// Bind a StaticToken to a StaticTokenHandle. +#[derive(Debug, Serialize, Deserialize)] +pub struct SetupBindStaticToken { + /// The StaticToken to bind. + pub static_token: StaticToken, +} + +/// Send a Fingerprint as part of 3d range-based set reconciliation. +#[derive(Debug, Serialize, Deserialize)] +pub struct ReconciliationSendFingerprint { + /// The 3dRange whose Fingerprint is transmitted. + pub range: SerdeRange3d, + /// The Fingerprint of the range, that is, of all LengthyEntries the peer has in the range. + pub fingerprint: Fingerprint, + /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. + pub sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. + pub receiver_handle: AreaOfInterestHandle, + /// If this message is the last of a set of messages that together cover the range of some prior + /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that + /// [`ReconciliationSendFingerprint`] message. Otherwise, none. + pub covers: Option, +} + +impl ReconciliationSendFingerprint { + pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { + (self.receiver_handle, self.sender_handle) + } +} + +/// Prepare transmission of the LengthyEntries a peer has in a 3dRange as part of 3d range-based set reconciliation. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ReconciliationAnnounceEntries { + /// The 3dRange whose LengthyEntries to transmit. + pub range: SerdeRange3d, + /// The number of Entries the sender has in the range. + pub count: u64, + /// A boolean flag to indicate whether the sender wishes to receive a ReconciliationAnnounceEntries message for the same 3dRange in return. + pub want_response: bool, + /// Whether the sender promises to send the Entries in the range sorted from oldest to newest. + pub will_sort: bool, + /// An AreaOfInterestHandle, bound by the sender of this message, that fully contains the range. + pub sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message, that fully contains the range. + pub receiver_handle: AreaOfInterestHandle, + /// If this message is the last of a set of messages that together cover the range of some prior + /// [`ReconciliationSendFingerprint`] message, then this field contains the range_count of that + /// [`ReconciliationSendFingerprint`] message. Otherwise, none. + pub covers: Option, +} + +impl ReconciliationAnnounceEntries { + pub fn handles(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { + (self.receiver_handle, self.sender_handle) + } +} + +/// Transmit a [`LengthyEntry`] as part of 3d range-based set reconciliation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReconciliationSendEntry { + /// The LengthyEntry itself. + pub entry: LengthyEntry, + /// A StaticTokenHandle, bound by the sender of this message, that is bound to the static part of the entry’s AuthorisationToken. + pub static_token_handle: StaticTokenHandle, + /// The dynamic part of the entry’s AuthorisationToken. + pub dynamic_token: DynamicToken, +} + +/// Transmit some transformed Payload bytes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReconciliationSendPayload { + // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. + pub bytes: bytes::Bytes, +} + +/// Indicate that no more bytes will be transmitted for the currently transmitted Payload as part of set reconciliation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReconciliationTerminatePayload; + +/// Transmit an AuthorisedEntry to the other peer, and optionally prepare transmission of its Payload. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSendEntry { + /// The Entry to transmit. + pub entry: Entry, + /// A [`StaticTokenHandle`] bound to the StaticToken of the Entry to transmit. + pub static_token_handle: StaticTokenHandle, + /// The DynamicToken of the Entry to transmit. + pub dynamic_token: DynamicToken, + /// The offset in the Payload in bytes at which Payload transmission will begin. + /// + /// If this is equal to the Entry’s payload_length, the Payload will not be transmitted. + pub offset: u64, +} + +/// Transmit some transformed Payload bytes. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSendPayload { + // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. + pub bytes: bytes::Bytes, +} + +/// Express preferences for Payload transfer in the intersection of two AreaOfInterests. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DataSetMetadata { + /// An AreaOfInterestHandle, bound by the sender of this message. + sender_handle: AreaOfInterestHandle, + /// An AreaOfInterestHandle, bound by the receiver of this message. + receiver_handle: AreaOfInterestHandle, + // Whether the other peer should eagerly forward Payloads in this intersection. + is_eager: bool, +} + +// /// Bind an Entry to a PayloadRequestHandle and request transmission of its Payload from an offset. +// #[derive(Debug, Clone, Serialize, Deserialize)] +// pub struct DataBindPayloadRequest { +// /// The Entry to request. +// entry: Entry, +// /// The offset in the Payload starting from which the sender would like to receive the Payload bytes. +// offset: u64, +// /// A resource handle for a ReadCapability bound by the sender that grants them read access to the bound Entry. +// capability: CapabilityHandle, +// } +// +// /// Set up the state for replying to a DataBindPayloadRequest message. +// #[derive(Debug, Clone, Serialize, Deserialize)] +// pub struct DataReplyPayload { +// /// The PayloadRequestHandle to which to reply. +// handle: u64, +// } + +/// Make a binding promise of available buffer capacity to the other peer +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlIssueGuarantee { + pub amount: u64, + pub channel: LogicalChannel, +} + +/// Allow the other peer to reduce its total buffer capacity by amount. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlAbsolve { + pub amount: u64, + pub channel: LogicalChannel, +} + +/// Ask the other peer to send an ControlAbsolve message +/// such that the receiver remaining guarantees will be target. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlPlead { + pub target: u64, + pub channel: LogicalChannel, +} + +/// The server notifies the client that it has started dropping messages and will continue +/// to do so until it receives an Apologise message. The server must send any outstanding +/// guarantees of the logical channel before sending a AnnounceDropping message. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlAnnounceDropping { + pub channel: LogicalChannel, +} + +/// The client notifies the server that it can stop dropping messages on this logical channel. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlApologise { + pub channel: LogicalChannel, +} + +/// Ask the other peer to free a resource handle. +/// +/// This is needed for symmetric protocols where peers act as both client and server simultaneously +/// and bind resource handles to the same handle types. +#[derive(Debug, Serialize, Deserialize)] +pub struct ControlFreeHandle { + handle: u64, + /// Indicates whether the peer sending this message is the one who created the handle (true) or not (false). + mine: bool, + handle_type: HandleType, +} + +pub type PsiGroupBytes = [u8; 32]; + +/// Bind data to an IntersectionHandle for performing private area intersection. +#[derive(derive_more::Debug, Serialize, Deserialize)] +pub struct PaiBindFragment { + /// The result of first applying hash_into_group to some fragment for private area intersection and then performing scalar multiplication with scalar. + #[debug("{}", hex::encode(self.group_member))] + pub group_member: PsiGroupBytes, + /// Set to true if the private set intersection item is a secondary fragment. + pub is_secondary: bool, +} + +/// Finalise private set intersection for a single item. +#[derive(derive_more::Debug, Serialize, Deserialize)] +pub struct PaiReplyFragment { + /// The IntersectionHandle of the PaiBindFragment message which this finalises. + pub handle: IntersectionHandle, + /// The result of performing scalar multiplication between the group_member of the message that this is replying to and scalar. + #[debug("{}", hex::encode(self.group_member))] + pub group_member: PsiGroupBytes, +} + +/// Ask the receiver to send a SubspaceCapability. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiRequestSubspaceCapability { + /// The IntersectionHandle bound by the sender for the least-specific secondary fragment for whose NamespaceId to request the SubspaceCapability. + pub handle: IntersectionHandle, +} + +/// Send a previously requested SubspaceCapability. +#[derive(Debug, Serialize, Deserialize)] +pub struct PaiReplySubspaceCapability { + /// The handle of the PaiRequestSubspaceCapability message that this answers (hence, an IntersectionHandle bound by the receiver of this message). + pub handle: IntersectionHandle, + /// A SubspaceCapability whose granted namespace corresponds to the request this answers. + pub capability: SubspaceCapability, + /// The SyncSubspaceSignature issued by the receiver of the capability over the sender’s challenge. + pub signature: SyncSignature, +} From a0685733ce83d1d23830ad02881c8373f9b13ac2 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 13:55:07 +0200 Subject: [PATCH 120/198] wip: convert most modules --- iroh-willow/src/auth.rs | 437 ---------------------- iroh-willow/src/form.rs | 44 +-- iroh-willow/src/interest.rs | 231 ++++++++++++ iroh-willow/src/lib.rs | 6 +- iroh-willow/src/proto/data_model.rs | 98 +++-- iroh-willow/src/proto/grouping.rs | 51 ++- iroh-willow/src/proto/meadowcap.rs | 76 ++-- iroh-willow/src/proto/wgps/fingerprint.rs | 4 +- iroh-willow/src/proto/wgps/messages.rs | 17 +- iroh-willow/src/session.rs | 76 +--- iroh-willow/src/store.rs | 50 ++- iroh-willow/src/store/auth.rs | 309 +++++++++++++++ iroh-willow/src/store/entry.rs | 11 +- iroh-willow/src/store/memory.rs | 88 +++-- iroh-willow/src/store/traits.rs | 8 +- 15 files changed, 815 insertions(+), 691 deletions(-) delete mode 100644 iroh-willow/src/auth.rs create mode 100644 iroh-willow/src/interest.rs diff --git a/iroh-willow/src/auth.rs b/iroh-willow/src/auth.rs deleted file mode 100644 index 01e6f8ee4f..0000000000 --- a/iroh-willow/src/auth.rs +++ /dev/null @@ -1,437 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use anyhow::Result; -use serde::{Deserialize, Serialize}; -use tracing::debug; - -use crate::{ - proto::{ - grouping::{Area, AreaOfInterest, Point}, - keys::{NamespaceId, NamespaceKind, NamespacePublicKey, UserId, UserPublicKey}, - meadowcap::{AccessMode, McCapability}, - sync::ReadAuthorisation, - willow::{Entry, WriteCapability}, - }, - session::{AreaOfInterestSelector, Interests}, - store::traits::{CapsStorage, SecretStorage, SecretStoreError, Storage}, -}; - -pub type InterestMap = HashMap>; - -#[derive(Debug, Clone)] -pub struct DelegateTo { - pub user: UserId, - pub restrict_area: RestrictArea, -} - -impl DelegateTo { - pub fn new(user: UserId, restrict_area: RestrictArea) -> Self { - Self { - user, - restrict_area, - } - } -} - -#[derive(Debug, Clone)] -pub enum RestrictArea { - None, - Restrict(Area), -} - -impl RestrictArea { - pub fn with_default(self, default: Area) -> Area { - match self { - RestrictArea::None => default.clone(), - RestrictArea::Restrict(area) => area, - } - } -} - -/// Selector for a capability. -#[derive(Debug, Clone, Hash, Eq, PartialEq)] -pub struct CapSelector { - /// The namespace to which the capability must grant access. - pub namespace_id: NamespaceId, - /// Select the user who may use the capability. - pub receiver: ReceiverSelector, - /// Select the area to which the capability grants access. - pub granted_area: AreaSelector, -} - -impl From for CapSelector { - fn from(value: NamespaceId) -> Self { - Self::widest(value) - } -} - -impl CapSelector { - /// Checks if the provided capability is matched by this [`CapSelector`]. - pub fn is_covered_by(&self, cap: &McCapability) -> bool { - self.namespace_id == cap.granted_namespace().id() - && self.receiver.includes(&cap.receiver().id()) - && self.granted_area.is_covered_by(&cap.granted_area()) - } - - /// Creates a new [`CapSelector`]. - pub fn new( - namespace_id: NamespaceId, - receiver: ReceiverSelector, - granted_area: AreaSelector, - ) -> Self { - Self { - namespace_id, - receiver, - granted_area, - } - } - - /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace - /// and user. - pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { - Self::new( - namespace_id, - ReceiverSelector::Exact(user_id), - AreaSelector::Widest, - ) - } - - /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace. - /// - /// Will use any user available in our secret store and select the capability which grants the - /// widest area. - // TODO: Document exact selection process if there are capabilities with distinct areas. - pub fn widest(namespace: NamespaceId) -> Self { - Self::new(namespace, ReceiverSelector::Any, AreaSelector::Widest) - } - - /// Select a capability which authorises writing the provided `entry` on behalf of the provided - /// `user_id`. - pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { - let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); - Self { - namespace_id: entry.namespace_id, - receiver: user_id, - granted_area, - } - } -} - -/// Select the receiver for a capability. -#[derive( - Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, -)] -pub enum ReceiverSelector { - /// The receiver may be any user for which we have a secret key stored. - #[default] - Any, - /// The receiver must be the provided user. - Exact(UserId), -} - -impl ReceiverSelector { - pub fn includes(&self, user: &UserId) -> bool { - match self { - Self::Any => true, - Self::Exact(u) => u == user, - } - } -} - -/// Selector for the area to which a capability must grant access. -#[derive(Debug, Clone, Default, Hash, Eq, PartialEq)] -pub enum AreaSelector { - /// Use the capability which covers the biggest area. - #[default] - Widest, - /// Use any capability that covers the provided area. - ContainsArea(Area), - /// Use any capability that covers the provided point (i.e. entry). - ContainsPoint(Point), -} - -impl AreaSelector { - /// Checks whether the provided [`Area`] is matched by this [`AreaSelector`]. - pub fn is_covered_by(&self, other: &Area) -> bool { - match self { - AreaSelector::Widest => true, - AreaSelector::ContainsArea(area) => other.includes_area(area), - AreaSelector::ContainsPoint(point) => other.includes_point(point), - } - } -} - -/// A serializable capability. -#[derive(Debug, Serialize, Deserialize, Clone)] -pub enum CapabilityPack { - /// A read authorisation. - Read(ReadAuthorisation), - /// A write authorisation. - Write(WriteCapability), -} - -impl CapabilityPack { - pub fn receiver(&self) -> UserId { - match self { - CapabilityPack::Read(auth) => auth.read_cap().receiver().id(), - CapabilityPack::Write(cap) => cap.receiver().id(), - } - } - pub fn validate(&self) -> Result<(), AuthError> { - match self { - CapabilityPack::Read(auth) => { - auth.read_cap().validate()?; - if let Some(subspace_cap) = auth.subspace_cap() { - subspace_cap.validate()?; - } - } - CapabilityPack::Write(cap) => { - cap.validate()?; - } - } - Ok(()) - } -} - -#[derive(Debug, Clone)] -pub struct Auth { - secrets: S::Secrets, - caps: S::Caps, -} - -impl Auth { - pub fn new(secrets: S::Secrets, caps: S::Caps) -> Self { - Self { secrets, caps } - } - pub fn get_write_cap( - &self, - selector: &CapSelector, - ) -> Result, AuthError> { - let cap = self.caps.get_write_cap(selector)?; - Ok(cap) - } - - pub fn get_read_cap( - &self, - selector: &CapSelector, - ) -> Result, AuthError> { - let cap = self.caps.get_read_cap(selector)?; - Ok(cap) - } - - pub fn list_read_caps(&self) -> Result + '_> { - self.caps.list_read_caps(None) - } - - pub fn import_caps( - &self, - caps: impl IntoIterator, - ) -> Result<(), AuthError> { - for cap in caps.into_iter() { - cap.validate()?; - // Only allow importing caps we can use. - // TODO: Is this what we want? - let user_id = cap.receiver(); - if !self.secrets.has_user(&user_id) { - return Err(AuthError::MissingUserSecret(user_id)); - } - self.caps.insert(cap)?; - } - Ok(()) - } - - pub fn insert_caps_unchecked( - &self, - caps: impl IntoIterator, - ) -> Result<(), AuthError> { - for cap in caps.into_iter() { - debug!(?cap, "insert cap"); - self.caps.insert(cap)?; - } - Ok(()) - } - - pub fn resolve_interests(&self, interests: Interests) -> Result { - match interests { - Interests::All => { - let out = self - .list_read_caps()? - .map(|auth| { - let area = auth.read_cap().granted_area(); - let aoi = AreaOfInterest::new(area); - (auth, HashSet::from_iter([aoi])) - }) - .collect::>(); - Ok(out) - } - Interests::Select(interests) => { - let mut out: InterestMap = HashMap::new(); - for (cap_selector, aoi_selector) in interests { - let cap = self.get_read_cap(&cap_selector)?; - if let Some(cap) = cap { - let entry = out.entry(cap.clone()).or_default(); - match aoi_selector { - AreaOfInterestSelector::Widest => { - let area = cap.read_cap().granted_area(); - let aoi = AreaOfInterest::new(area); - entry.insert(aoi); - } - AreaOfInterestSelector::Exact(aois) => { - for aoi in aois { - entry.insert(aoi); - } - } - } - } - } - Ok(out) - } - Interests::Exact(interests) => Ok(interests), - } - } - - pub fn create_full_caps( - &self, - namespace_id: NamespaceId, - user_id: UserId, - ) -> Result<[CapabilityPack; 2], AuthError> { - let namespace_key = namespace_id - .into_public_key() - .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; - let user_key: UserPublicKey = user_id - .into_public_key() - .map_err(|_| AuthError::InvalidUserId(user_id))?; - let read_cap = self.create_read_cap(namespace_key, user_key)?; - let write_cap = self.create_write_cap(namespace_key, user_key)?; - let pack = [read_cap, write_cap]; - self.insert_caps_unchecked(pack.clone())?; - Ok(pack) - } - - pub fn create_read_cap( - &self, - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - ) -> Result { - let namespace_id = namespace_key.id(); - let cap = match namespace_key.kind() { - NamespaceKind::Owned => { - let namespace_secret = self - .secrets - .get_namespace(&namespace_id) - .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(&namespace_secret, user_key, AccessMode::ReadOnly) - } - NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::ReadOnly) - } - }; - // TODO: Subspace capability. - let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); - Ok(pack) - } - - pub fn create_write_cap( - &self, - namespace_key: NamespacePublicKey, - user_key: UserPublicKey, - ) -> Result { - let namespace_id = namespace_key.id(); - let cap = match namespace_key.kind() { - NamespaceKind::Owned => { - let namespace_secret = self - .secrets - .get_namespace(&namespace_id) - .ok_or(AuthError::MissingNamespaceSecret(namespace_id))?; - McCapability::new_owned(&namespace_secret, user_key, AccessMode::ReadWrite) - } - NamespaceKind::Communal => { - McCapability::new_communal(namespace_key, user_key, AccessMode::ReadWrite) - } - }; - let pack = CapabilityPack::Write(cap); - Ok(pack) - } - - pub fn delegate_full_caps( - &self, - from: CapSelector, - access_mode: AccessMode, - to: DelegateTo, - store: bool, - ) -> Result, AuthError> { - let mut out = Vec::with_capacity(2); - let user_key: UserPublicKey = to - .user - .into_public_key() - .map_err(|_| AuthError::InvalidUserId(to.user))?; - let restrict_area = to.restrict_area; - let read_cap = self.delegate_read_cap(&from, user_key, restrict_area.clone())?; - out.push(read_cap); - if access_mode == AccessMode::ReadWrite { - let write_cap = self.delegate_write_cap(&from, user_key, restrict_area)?; - out.push(write_cap); - } - if store { - self.insert_caps_unchecked(out.clone())?; - } - Ok(out) - } - - pub fn delegate_read_cap( - &self, - from: &CapSelector, - to: UserPublicKey, - restrict_area: RestrictArea, - ) -> Result { - let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; - let read_cap = auth.read_cap(); - let _subspace_cap = auth.subspace_cap(); - let user_id = read_cap.receiver().id(); - let user_secret = self - .secrets - .get_user(&user_id) - .ok_or(AuthError::MissingUserSecret(user_id))?; - let area = restrict_area.with_default(read_cap.granted_area()); - let new_read_cap = read_cap.delegate(&user_secret, to, area)?; - // TODO: Subspace capability - let new_subspace_cap = None; - let pack = CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); - Ok(pack) - } - - pub fn delegate_write_cap( - &self, - from: &CapSelector, - to: UserPublicKey, - restrict_area: RestrictArea, - ) -> Result { - let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; - let user_secret = self - .secrets - .get_user(&cap.receiver().id()) - .ok_or(AuthError::MissingUserSecret(cap.receiver().id()))?; - let area = restrict_area.with_default(cap.granted_area()); - let new_cap = cap.delegate(&user_secret, to, area)?; - Ok(CapabilityPack::Write(new_cap)) - } -} - -#[derive(thiserror::Error, Debug)] -pub enum AuthError { - #[error("invalid user id: {}", .0.fmt_short())] - InvalidUserId(UserId), - #[error("invalid namespace id: {}", .0.fmt_short())] - InvalidNamespaceId(NamespaceId), - #[error("missing user secret: {}", .0.fmt_short())] - MissingUserSecret(UserId), - #[error("missing namespace secret: {}", .0.fmt_short())] - MissingNamespaceSecret(NamespaceId), - #[error("secret store error: {0}")] - SecretStore(#[from] SecretStoreError), - #[error("no capability found")] - NoCapability, - // TODO: remove - #[error("{0}")] - Other(#[from] anyhow::Error), -} diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index c650b84a20..83c0d084e9 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -14,13 +14,10 @@ use iroh_blobs::{ use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; -use crate::{ - proto::{ - keys::UserId, - willow::{Entry, NamespaceId, Path, SubspaceId, Timestamp, WriteCapability}, - }, - store::{traits::Storage, Store}, - util::time::system_time_now, +use crate::proto::{ + data_model::SerdeWriteCapability, + keys::UserId, + willow::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, }; /// Sources where payload data can come from. @@ -113,35 +110,6 @@ impl EntryForm { payload: PayloadForm::Bytes(payload.into()), } } - - /// Convert the form into an [`Entry`] by filling the fields with data from the environment and - /// the provided [`Store`]. - /// - /// `user_id` must be set to the user who is authenticating the entry. - pub async fn into_entry( - self, - store: &Store, - user_id: UserId, // auth: AuthForm, - ) -> anyhow::Result { - let timestamp = match self.timestamp { - TimestampForm::Now => system_time_now(), - TimestampForm::Exact(timestamp) => timestamp, - }; - let subspace_id = match self.subspace_id { - SubspaceForm::User => user_id, - SubspaceForm::Exact(subspace) => subspace, - }; - let (payload_digest, payload_length) = self.payload.submit(store.payloads()).await?; - let entry = Entry { - namespace_id: self.namespace_id, - subspace_id, - path: self.path, - timestamp, - payload_length, - payload_digest, - }; - Ok(entry) - } } /// Select which capability to use for authenticating a new entry. @@ -151,7 +119,7 @@ pub enum AuthForm { /// user. Any(UserId), /// Use the provided [`WriteCapability`]. - Exact(WriteCapability), + Exact(SerdeWriteCapability), } impl AuthForm { @@ -160,7 +128,7 @@ impl AuthForm { pub fn user_id(&self) -> UserId { match self { AuthForm::Any(user) => *user, - AuthForm::Exact(cap) => cap.receiver().id(), + AuthForm::Exact(cap) => *cap.receiver(), } } } diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs new file mode 100644 index 0000000000..436532bdf9 --- /dev/null +++ b/iroh-willow/src/interest.rs @@ -0,0 +1,231 @@ +use std::collections::{hash_map, HashMap, HashSet}; + +use serde::{Deserialize, Serialize}; + +use crate::proto::{ + data_model::{Entry, SerdeWriteCapability}, + grouping::{Area, AreaExt, AreaOfInterest, Point}, + keys::{NamespaceId, UserId}, + meadowcap::{serde_encoding::SerdeReadAuthorisation, McCapability, ReadAuthorisation}, +}; + +pub type InterestMap = HashMap>; + +#[derive(Debug, Default, Clone)] +pub enum Interests { + #[default] + All, + Select(HashMap), + Exact(InterestMap), +} + +impl Interests { + pub fn builder() -> SelectBuilder { + SelectBuilder::default() + } + + pub fn all() -> Self { + Self::All + } +} + +#[derive(Default, Debug)] +pub struct SelectBuilder(HashMap); + +impl SelectBuilder { + pub fn add_full_cap(mut self, cap: impl Into) -> Self { + let cap = cap.into(); + self.0.insert(cap, AreaOfInterestSelector::Widest); + self + } + + pub fn add_area( + mut self, + cap: impl Into, + aois: impl IntoIterator>, + ) -> Self { + let cap = cap.into(); + let aois = aois.into_iter(); + let aois = aois.map(|aoi| aoi.into()); + match self.0.entry(cap) { + hash_map::Entry::Vacant(entry) => { + entry.insert(AreaOfInterestSelector::Exact(aois.collect())); + } + hash_map::Entry::Occupied(mut entry) => match entry.get_mut() { + AreaOfInterestSelector::Widest => {} + AreaOfInterestSelector::Exact(existing) => existing.extend(aois), + }, + } + self + } + + pub fn build(self) -> Interests { + Interests::Select(self.0) + } +} + +impl From for Interests { + fn from(builder: SelectBuilder) -> Self { + builder.build() + } +} + +#[derive(Debug, Default, Clone)] +pub enum AreaOfInterestSelector { + #[default] + Widest, + Exact(HashSet), +} + +/// Selector for a capability. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct CapSelector { + /// The namespace to which the capability must grant access. + pub namespace_id: NamespaceId, + /// Select the user who may use the capability. + pub receiver: ReceiverSelector, + /// Select the area to which the capability grants access. + pub granted_area: AreaSelector, +} + +impl From for CapSelector { + fn from(value: NamespaceId) -> Self { + Self::widest(value) + } +} + +impl CapSelector { + /// Checks if the provided capability is matched by this [`CapSelector`]. + pub fn is_covered_by(&self, cap: &McCapability) -> bool { + self.namespace_id == *cap.granted_namespace() + && self.receiver.includes(&cap.receiver()) + && self.granted_area.is_covered_by(&cap.granted_area()) + } + + /// Creates a new [`CapSelector`]. + pub fn new( + namespace_id: NamespaceId, + receiver: ReceiverSelector, + granted_area: AreaSelector, + ) -> Self { + Self { + namespace_id, + receiver, + granted_area, + } + } + + /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace + /// and user. + pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { + Self::new( + namespace_id, + ReceiverSelector::Exact(user_id), + AreaSelector::Widest, + ) + } + + /// Creates a [`CapSelector`] which selects the widest capability for the provided namespace. + /// + /// Will use any user available in our secret store and select the capability which grants the + /// widest area. + // TODO: Document exact selection process if there are capabilities with distinct areas. + pub fn widest(namespace: NamespaceId) -> Self { + Self::new(namespace, ReceiverSelector::Any, AreaSelector::Widest) + } + + /// Select a capability which authorises writing the provided `entry` on behalf of the provided + /// `user_id`. + pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { + let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); + Self { + namespace_id: *entry.namespace_id(), + receiver: user_id, + granted_area, + } + } +} + +/// Select the receiver for a capability. +#[derive( + Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, +)] +pub enum ReceiverSelector { + /// The receiver may be any user for which we have a secret key stored. + #[default] + Any, + /// The receiver must be the provided user. + Exact(UserId), +} + +impl ReceiverSelector { + pub fn includes(&self, user: &UserId) -> bool { + match self { + Self::Any => true, + Self::Exact(u) => u == user, + } + } +} + +/// Selector for the area to which a capability must grant access. +#[derive(Debug, Clone, Default, Hash, Eq, PartialEq)] +pub enum AreaSelector { + /// Use the capability which covers the biggest area. + #[default] + Widest, + /// Use any capability that covers the provided area. + ContainsArea(Area), + /// Use any capability that covers the provided point (i.e. entry). + ContainsPoint(Point), +} + +impl AreaSelector { + /// Checks whether the provided [`Area`] is matched by this [`AreaSelector`]. + pub fn is_covered_by(&self, other: &Area) -> bool { + match self { + AreaSelector::Widest => true, + AreaSelector::ContainsArea(area) => other.includes_area(area), + AreaSelector::ContainsPoint(point) => other.includes_point(point), + } + } +} + +/// A serializable capability. +// TODO: This doesn't really belong into this module. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum CapabilityPack { + /// A read authorisation. + Read(SerdeReadAuthorisation), + /// A write authorisation. + Write(SerdeWriteCapability), +} + +impl CapabilityPack { + pub fn receiver(&self) -> UserId { + match self { + CapabilityPack::Read(auth) => *auth.read_cap().receiver(), + CapabilityPack::Write(cap) => *cap.receiver(), + } + } + + pub fn validate(&self) -> Result<(), InvalidCapabilityPack> { + // meadowcap capability are validated on deserialization. + Ok(()) + // match self { + // CapabilityPack::Read(auth) => { + // auth.read_cap().validate()?; + // if let Some(subspace_cap) = auth.subspace_cap() { + // subspace_cap.validate()?; + // } + // } + // CapabilityPack::Write(cap) => { + // cap.0.validate()?; + // } + // } + // Ok(()) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("Invalid capability pack.")] +pub struct InvalidCapabilityPack; diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index ceedf2cde7..715dc73973 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -3,13 +3,13 @@ #![allow(missing_docs)] #![deny(unsafe_code)] -// pub mod auth; // pub mod engine; -// pub mod form; +pub mod form; // pub mod net; pub mod proto; // pub mod session; -// pub mod store; +pub mod interest; +pub mod store; pub mod util; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 10144fcf5f..afcfa230f8 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -17,6 +17,9 @@ pub type SubspaceId = keys::UserId; /// The capability type needed to authorize writes. pub type WriteCapability = meadowcap::McCapability; +/// The capability type needed to authorize writes (serializable). +pub type SerdeWriteCapability = meadowcap::serde_encoding::SerdeMcCapability; + /// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). /// Timestamps are to be interpreted as a time in microseconds since the Unix epoch. pub type Timestamp = willow_data_model::Timestamp; @@ -122,44 +125,66 @@ impl PathExt for Path { } } -#[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] -pub struct Entry( - willow_data_model::Entry< - MAX_COMPONENT_LENGTH, - MAX_COMPONENT_COUNT, - MAX_PATH_LENGTH, - NamespaceId, - SubspaceId, - PayloadDigest, - >, -); - -impl Entry { - pub fn encode(&self) -> Vec { +// #[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] +pub type Entry = willow_data_model::Entry< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + NamespaceId, + SubspaceId, + PayloadDigest, +>; + +pub trait EntryExt { + fn encode_to_vec(&self) -> Vec; + fn decode_from_slice(bytes: &[u8]) -> anyhow::Result; + fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path); +} + +impl EntryExt for Entry { + fn encode_to_vec(&self) -> Vec { let mut consumer = IntoVec::::new(); - self.0.encode(&mut consumer).expect("encoding not to fail"); + self.encode(&mut consumer).expect("encoding not to fail"); consumer.into_vec() } - - pub fn decode(bytes: &[u8]) -> anyhow::Result { + fn decode_from_slice(bytes: &[u8]) -> anyhow::Result { let mut producer = FromSlice::::new(bytes); let entry = willow_data_model::Entry::decode(&mut producer)?; - Ok(Self(entry)) + Ok(entry) + } + + fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path) { + (self.namespace_id(), self.subspace_id(), self.path()) + } +} + +#[derive(Debug, Clone)] +pub struct AuthorisedEntry(pub Entry, pub AuthorisationToken); + +impl std::ops::Deref for AuthorisedEntry { + type Target = Entry; + fn deref(&self) -> &Self::Target { + &self.0 } } -#[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] -pub struct AuthorisedEntry( - willow_data_model::AuthorisedEntry< - MAX_COMPONENT_LENGTH, - MAX_COMPONENT_COUNT, - MAX_PATH_LENGTH, - NamespaceId, - SubspaceId, - PayloadDigest, - AuthorisationToken, - >, -); +impl AuthorisedEntry { + pub fn entry(&self) -> &Entry { + &self.0 + } +} + +// #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] +// pub type AuthorisedEntry = +// willow_data_model::AuthorisedEntry< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// NamespaceId, +// SubspaceId, +// PayloadDigest, +// AuthorisationToken, +// >; // pub type Path = willow_data_model::Path; @@ -231,7 +256,10 @@ pub mod serde_encoding { use super::*; - impl Serialize for Entry { + #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] + pub struct SerdeEntry(pub Entry); + + impl Serialize for SerdeEntry { fn serialize(&self, serializer: S) -> Result { let encoded = { let mut consumer = IntoVec::::new(); @@ -242,16 +270,16 @@ pub mod serde_encoding { } } - impl<'de> Deserialize<'de> for Entry { - fn deserialize(deserializer: D) -> Result + impl<'de> Deserialize<'de> for SerdeEntry { + fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { let data: Vec = Deserialize::deserialize(deserializer)?; let decoded = { let mut producer = FromSlice::new(&data); - let decoded = - willow_data_model::Entry::decode(&mut producer).expect("decoding not to fail"); + let decoded = willow_data_model::Entry::decode(&mut producer) + .map_err(serde::de::Error::custom)?; Self(decoded) }; Ok(decoded) diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 70a44d0d96..4689b172d9 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,11 +1,9 @@ -use willow_data_model::grouping::RangeEnd; +pub use willow_data_model::grouping::{Range, RangeEnd}; use super::data_model::{ Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, }; -pub type Range = willow_data_model::grouping::Range; - // /// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. // #[derive( // Debug, Clone, Hash, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, @@ -54,15 +52,35 @@ pub type AreaSubspace = willow_data_model::grouping::AreaSubspace; /// A grouping of [`crate::Entry`]s that are among the newest in some [store](https://willowprotocol.org/specs/data-model/index.html#store). /// /// [Definition](https://willowprotocol.org/specs/grouping-entries/index.html#aois). -#[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] -pub struct AreaOfInterest( - willow_data_model::grouping::AreaOfInterest< - MAX_COMPONENT_LENGTH, - MAX_COMPONENT_COUNT, - MAX_PATH_LENGTH, - SubspaceId, - >, -); +// #[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] +// pub struct AreaOfInterest( +// willow_data_model::grouping::AreaOfInterest< +// MAX_COMPONENT_LENGTH, +// MAX_COMPONENT_COUNT, +// MAX_PATH_LENGTH, +// SubspaceId, +// >, +// ); +pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + SubspaceId, +>; + +pub trait AreaOfInterestExt { + fn new(area: Area) -> AreaOfInterest; +} + +impl AreaOfInterestExt for AreaOfInterest { + fn new(area: Area) -> AreaOfInterest { + AreaOfInterest { + area, + max_count: 0, + max_size: 0, + } + } +} pub trait AreaExt { fn includes_point(&self, point: &Point) -> bool; @@ -140,7 +158,12 @@ pub mod serde_encoding { use super::*; - impl Serialize for AreaOfInterest { + #[derive( + Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, + )] + pub struct SerdeAreaOfInterest(AreaOfInterest); + + impl Serialize for SerdeAreaOfInterest { fn serialize(&self, serializer: S) -> Result { let relative = Area::new_full(); let encoded_area = { @@ -155,7 +178,7 @@ pub mod serde_encoding { } } - impl<'de> Deserialize<'de> for AreaOfInterest { + impl<'de> Deserialize<'de> for SerdeAreaOfInterest { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 5216edc994..f329955507 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -60,6 +60,13 @@ pub fn is_authorised_write(entry: &Entry, token: &McAuthorisationToken) -> bool token.is_authorised_write(entry) } +pub type FailedDelegationError = meadowcap::FailedDelegationError< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + keys::UserId, +>; + /// Represents an authorisation to read an area of data in a Namespace. // TODO: Move somewhere else? #[derive(Debug, Clone, Hash, Eq, PartialEq)] @@ -119,12 +126,6 @@ impl ReadAuthorisation { } } -// #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] -// pub enum AccessMode { -// ReadOnly, -// ReadWrite, -// } - pub mod serde_encoding { use serde::{Deserialize, Deserializer}; use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; @@ -134,28 +135,52 @@ pub mod serde_encoding { use super::*; - #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Hash)] - pub struct SerdeReadAuthorisation(SerdeMcCapability, Option); + #[derive( + Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, + )] + pub struct SerdeReadAuthorisation(pub ReadAuthorisation); + + impl Serialize for SerdeReadAuthorisation { + fn serialize(&self, serializer: S) -> Result { + let relative = Area::new_full(); + let encoded_cap = { + let mut consumer = IntoVec::::new(); + self.0 + .0 + .relative_encode(&relative, &mut consumer) + .expect("encoding not to fail"); + consumer.into_vec() + }; - impl From for SerdeReadAuthorisation { - fn from(value: ReadAuthorisation) -> Self { - Self( - SerdeMcCapability::from(value.0), - value.1.map(SerdeMcSubspaceCapability::from), - ) + let encoded_subspace_cap = self.0 .1.as_ref().map(|cap| { + let mut consumer = IntoVec::::new(); + cap.encode(&mut consumer).expect("encoding not to fail"); + consumer.into_vec() + }); + (encoded_cap, encoded_subspace_cap).serialize(serializer) } } - impl From for ReadAuthorisation { - fn from(value: SerdeReadAuthorisation) -> Self { - Self(value.0.into(), value.1.map(Into::into)) + impl<'de> Deserialize<'de> for SerdeReadAuthorisation { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (read_cap, subspace_cap) = + <(SerdeMcCapability, Option)>::deserialize( + deserializer, + )?; + Ok(Self(ReadAuthorisation( + read_cap.into(), + subspace_cap.map(Into::into), + ))) } } #[derive( Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, )] - pub struct SerdeMcCapability(McCapability); + pub struct SerdeMcCapability(pub McCapability); impl Serialize for SerdeMcCapability { fn serialize(&self, serializer: S) -> Result { @@ -181,7 +206,7 @@ pub mod serde_encoding { let decoded = { let mut producer = FromSlice::new(&data); let decoded = McCapability::relative_decode(&relative, &mut producer) - .expect("decoding not to fail"); + .map_err(|e| serde::de::Error::custom(e))?; Self(decoded) }; Ok(decoded) @@ -191,7 +216,7 @@ pub mod serde_encoding { #[derive( Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, )] - pub struct SerdeMcSubspaceCapability(McSubspaceCapability); + pub struct SerdeMcSubspaceCapability(pub McSubspaceCapability); impl Serialize for SerdeMcSubspaceCapability { fn serialize(&self, serializer: S) -> Result { @@ -212,8 +237,8 @@ pub mod serde_encoding { let data: Vec = Deserialize::deserialize(deserializer)?; let decoded = { let mut producer = FromSlice::new(&data); - let decoded = - McSubspaceCapability::decode(&mut producer).expect("decoding not to fail"); + let decoded = McSubspaceCapability::decode(&mut producer) + .map_err(|e| serde::de::Error::custom(e))?; Self(decoded) }; Ok(decoded) @@ -221,6 +246,13 @@ pub mod serde_encoding { } } +/// Returns `true` if `self` covers a larger area than `other`, +/// or if covers the same area and has less delegations. +pub fn is_wider_than(a: &McCapability, b: &McCapability) -> bool { + (a.granted_area().includes_area(&b.granted_area())) + || (a.granted_area() == b.granted_area() && a.delegations().len() < b.delegations().len()) +} + // use std::{io::Write, sync::Arc}; // use serde::{Deserialize, Serialize}; diff --git a/iroh-willow/src/proto/wgps/fingerprint.rs b/iroh-willow/src/proto/wgps/fingerprint.rs index 61f11f4271..d5358e2ae0 100644 --- a/iroh-willow/src/proto/wgps/fingerprint.rs +++ b/iroh-willow/src/proto/wgps/fingerprint.rs @@ -3,7 +3,7 @@ use std::fmt; use iroh_blobs::Hash; use serde::{Deserialize, Serialize}; -use crate::proto::data_model::Entry; +use crate::proto::data_model::{Entry, EntryExt}; #[derive(Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] pub struct Fingerprint(pub [u8; 32]); @@ -17,7 +17,7 @@ impl fmt::Debug for Fingerprint { impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { // TODO: Don't allocate - let encoded = entry.encode(); + let encoded = entry.encode_to_vec(); let next = Fingerprint(*Hash::new(&encoded).as_bytes()); *self ^= next; } diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index 6121f4cff4..9835052e5d 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -5,7 +5,11 @@ use serde::{Deserialize, Serialize}; use crate::{ proto::{ challenge::AccessChallenge, - grouping::{serde_encoding::SerdeRange3d, Area, AreaOfInterest}, + data_model::serde_encoding::SerdeEntry, + grouping::{ + serde_encoding::{SerdeAreaOfInterest, SerdeRange3d}, + Area, + }, meadowcap::{self}, willow::Entry, }, @@ -47,14 +51,17 @@ pub type Receiver = meadowcap::UserPublicKey; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LengthyEntry { /// The Entry in question. - pub entry: Entry, + pub entry: SerdeEntry, /// The number of consecutive bytes from the start of the entry’s Payload that the peer holds. pub available: u64, } impl LengthyEntry { pub fn new(entry: Entry, available: u64) -> Self { - Self { entry, available } + Self { + entry: entry.into(), + available, + } } } @@ -301,7 +308,7 @@ pub struct SetupBindReadCapability { #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] pub struct SetupBindAreaOfInterest { /// An AreaOfInterest that the peer wishes to reference in future messages. - pub area_of_interest: AreaOfInterest, + pub area_of_interest: SerdeAreaOfInterest, /// A CapabilityHandle bound by the sender that grants access to all entries in the message’s area_of_interest. pub authorisation: CapabilityHandle, } @@ -395,7 +402,7 @@ pub struct ReconciliationTerminatePayload; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DataSendEntry { /// The Entry to transmit. - pub entry: Entry, + pub entry: SerdeEntry, /// A [`StaticTokenHandle`] bound to the StaticToken of the Entry to transmit. pub static_token_handle: StaticTokenHandle, /// The DynamicToken of the Entry to transmit. diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 3f5d9d5d6b..ad4708f7cd 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -67,77 +67,6 @@ impl SessionMode { } } -#[derive(Debug, Default, Clone)] -pub enum Interests { - #[default] - All, - Select(HashMap), - Exact(HashMap>), -} - -impl Interests { - pub fn builder() -> SelectBuilder { - SelectBuilder::default() - } - - pub fn all() -> Self { - Self::All - } -} - -#[derive(Default, Debug)] -pub struct SelectBuilder(HashMap); - -impl SelectBuilder { - pub fn add_full_cap(mut self, cap: impl Into) -> Self { - let cap = cap.into(); - self.0.insert(cap, AreaOfInterestSelector::Widest); - self - } - - pub fn add_area( - mut self, - cap: impl Into, - aois: impl IntoIterator>, - ) -> Self { - let cap = cap.into(); - let aois = aois.into_iter(); - let aois = aois.map(|aoi| aoi.into()); - match self.0.entry(cap) { - hash_map::Entry::Vacant(entry) => { - entry.insert(AreaOfInterestSelector::Exact(aois.collect())); - } - hash_map::Entry::Occupied(mut entry) => match entry.get_mut() { - AreaOfInterestSelector::Widest => {} - AreaOfInterestSelector::Exact(existing) => existing.extend(aois), - }, - } - self - } - - pub fn build(self) -> Interests { - Interests::Select(self.0) - } -} - -impl From for Interests { - fn from(builder: SelectBuilder) -> Self { - builder.build() - } -} - -#[derive(Debug)] -pub enum SessionUpdate { - SubmitIntent(Intent), -} - -#[derive(Debug, Default, Clone)] -pub enum AreaOfInterestSelector { - #[default] - Widest, - Exact(BTreeSet), -} - /// Options to initialize a session with. #[derive(Debug)] pub struct SessionInit { @@ -196,6 +125,11 @@ pub enum SessionEvent { }, } +#[derive(Debug)] +pub enum SessionUpdate { + SubmitIntent(Intent), +} + #[derive(Debug)] pub struct SessionHandle { pub cancel_token: CancellationToken, diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 474cfb7ea8..d12779cd8e 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,17 +1,19 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use rand_core::CryptoRngCore; use crate::{ - auth::{Auth, AuthError, CapSelector, ReceiverSelector}, - form::{AuthForm, EntryOrForm}, + form::{AuthForm, EntryForm, EntryOrForm, SubspaceForm, TimestampForm}, + interest::{CapSelector, ReceiverSelector}, proto::{ + data_model::{AuthorisedEntry, PayloadDigest}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, willow::Entry, }, - session::Error, store::traits::SecretStorage, + util::time::system_time_now, }; +use self::auth::{Auth, AuthError}; use self::traits::Storage; pub use self::entry::{EntryOrigin, WatchableEntryStore}; @@ -59,10 +61,10 @@ impl Store { let user_id = auth.user_id(); let entry = match entry { EntryOrForm::Entry(entry) => Ok(entry), - EntryOrForm::Form(form) => form.into_entry(self, user_id).await, + EntryOrForm::Form(form) => self.form_to_entry(form, user_id).await, }?; let capability = match auth { - AuthForm::Exact(cap) => cap, + AuthForm::Exact(cap) => cap.0, AuthForm::Any(user_id) => { let selector = CapSelector::for_entry(&entry, ReceiverSelector::Exact(user_id)); self.auth() @@ -73,12 +75,13 @@ impl Store { let secret_key = self .secrets() .get_user(&user_id) - .ok_or(Error::MissingUserKey(user_id))?; - let authorised_entry = entry.attach_authorisation(capability, &secret_key)?; + .context("Missing user keypair")?; + let token = capability.authorisation_token(&entry, secret_key)?; + let authorised_entry = AuthorisedEntry(entry, token); let inserted = self .entries() .ingest(&authorised_entry, EntryOrigin::Local)?; - Ok((authorised_entry.into_entry(), inserted)) + Ok((authorised_entry.0, inserted)) } pub fn create_namespace( @@ -93,4 +96,33 @@ impl Store { self.auth().create_full_caps(namespace_id, owner)?; Ok(namespace_id) } + + /// Convert the form into an [`Entry`] by filling the fields with data from the environment and + /// the provided [`Store`]. + /// + /// `user_id` must be set to the user who is authenticating the entry. + pub async fn form_to_entry( + &self, + form: EntryForm, + user_id: UserId, // auth: AuthForm, + ) -> anyhow::Result { + let timestamp = match form.timestamp { + TimestampForm::Now => system_time_now(), + TimestampForm::Exact(timestamp) => timestamp, + }; + let subspace_id = match form.subspace_id { + SubspaceForm::User => user_id, + SubspaceForm::Exact(subspace) => subspace, + }; + let (payload_digest, payload_length) = form.payload.submit(self.payloads()).await?; + let entry = Entry::new( + form.namespace_id, + subspace_id, + form.path, + timestamp, + payload_length, + PayloadDigest(payload_digest), + ); + Ok(entry) + } } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 8b13789179..64112412f7 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -1 +1,310 @@ +use std::collections::{HashMap, HashSet}; +use anyhow::Result; +use ed25519_dalek::SignatureError; +use meadowcap::{IsCommunal, NamespaceIsNotCommunalError, OwnedCapabilityCreationError}; +use tracing::debug; + +use crate::{ + interest::{ + AreaOfInterestSelector, CapSelector, CapabilityPack, InterestMap, Interests, + InvalidCapabilityPack, + }, + proto::{ + data_model::WriteCapability, + grouping::{Area, AreaOfInterest, AreaOfInterestExt}, + keys::{NamespaceId, UserId}, + meadowcap::{AccessMode, FailedDelegationError, McCapability, ReadAuthorisation}, + }, + store::traits::{CapsStorage, SecretStorage, SecretStoreError, Storage}, +}; + +#[derive(Debug, Clone)] +pub struct DelegateTo { + pub user: UserId, + pub restrict_area: RestrictArea, +} + +impl DelegateTo { + pub fn new(user: UserId, restrict_area: RestrictArea) -> Self { + Self { + user, + restrict_area, + } + } +} + +#[derive(Debug, Clone)] +pub enum RestrictArea { + None, + Restrict(Area), +} + +impl RestrictArea { + pub fn with_default(self, default: Area) -> Area { + match self { + RestrictArea::None => default.clone(), + RestrictArea::Restrict(area) => area, + } + } +} + +#[derive(Debug, Clone)] +pub struct Auth { + secrets: S::Secrets, + caps: S::Caps, +} + +impl Auth { + pub fn new(secrets: S::Secrets, caps: S::Caps) -> Self { + Self { secrets, caps } + } + pub fn get_write_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + let cap = self.caps.get_write_cap(selector)?; + Ok(cap) + } + + pub fn get_read_cap( + &self, + selector: &CapSelector, + ) -> Result, AuthError> { + let cap = self.caps.get_read_cap(selector)?; + Ok(cap) + } + + pub fn list_read_caps(&self) -> Result + '_> { + self.caps.list_read_caps(None) + } + + pub fn import_caps( + &self, + caps: impl IntoIterator, + ) -> Result<(), AuthError> { + for cap in caps.into_iter() { + cap.validate()?; + // Only allow importing caps we can use. + // TODO: Is this what we want? + let user_id = cap.receiver(); + if !self.secrets.has_user(&user_id) { + return Err(AuthError::MissingUserSecret(user_id)); + } + self.caps.insert(cap)?; + } + Ok(()) + } + + pub fn insert_caps_unchecked( + &self, + caps: impl IntoIterator, + ) -> Result<(), AuthError> { + for cap in caps.into_iter() { + debug!(?cap, "insert cap"); + self.caps.insert(cap)?; + } + Ok(()) + } + + pub fn resolve_interests(&self, interests: Interests) -> Result { + match interests { + Interests::All => { + let out = self + .list_read_caps()? + .map(|auth| { + let area = auth.read_cap().granted_area(); + let aoi = AreaOfInterest::new(area); + (auth, HashSet::from_iter([aoi])) + }) + .collect::>(); + Ok(out) + } + Interests::Select(interests) => { + let mut out: InterestMap = HashMap::new(); + for (cap_selector, aoi_selector) in interests { + let cap = self.get_read_cap(&cap_selector)?; + if let Some(cap) = cap { + let entry = out.entry(cap.clone()).or_default(); + match aoi_selector { + AreaOfInterestSelector::Widest => { + let area = cap.read_cap().granted_area(); + let aoi = AreaOfInterest::new(area); + entry.insert(aoi); + } + AreaOfInterestSelector::Exact(aois) => { + for aoi in aois { + entry.insert(aoi); + } + } + } + } + } + Ok(out) + } + Interests::Exact(interests) => Ok(interests), + } + } + + pub fn create_full_caps( + &self, + namespace_id: NamespaceId, + user_id: UserId, + ) -> Result<[CapabilityPack; 2], AuthError> { + // let namespace_key = namespace_id + // .into_public_key() + // .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; + // let user_key: UserPublicKey = user_id + // .into_public_key() + // .map_err(|_| AuthError::InvalidUserId(user_id))?; + let read_cap = self.create_read_cap(namespace_id, user_id)?; + let write_cap = self.create_write_cap(namespace_id, user_id)?; + let pack = [read_cap, write_cap]; + self.insert_caps_unchecked(pack.clone())?; + Ok(pack) + } + + pub fn create_read_cap( + &self, + namespace_key: NamespaceId, + user_key: UserId, + ) -> Result { + let cap = if namespace_key.is_communal() { + McCapability::new_communal(namespace_key, user_key, AccessMode::Read)? + } else { + let namespace_secret = self + .secrets + .get_namespace(&namespace_key) + .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; + McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Read)? + }; + // TODO: Subspace capability. + let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None).into()); + Ok(pack) + } + + pub fn create_write_cap( + &self, + namespace_key: NamespaceId, + user_key: UserId, + ) -> Result { + let cap = if namespace_key.is_communal() { + McCapability::new_communal(namespace_key, user_key, AccessMode::Write)? + } else { + let namespace_secret = self + .secrets + .get_namespace(&namespace_key) + .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; + McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Read)? + }; + let pack = CapabilityPack::Write(cap.into()); + Ok(pack) + } + + pub fn delegate_full_caps( + &self, + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + store: bool, + ) -> Result, AuthError> { + let mut out = Vec::with_capacity(2); + // let user_key: UserPublicKey = to + // .user + // .into_public_key() + // .map_err(|_| AuthError::InvalidUserId(to.user))?; + let restrict_area = to.restrict_area; + let read_cap = self.delegate_read_cap(&from, to.user, restrict_area.clone())?; + out.push(read_cap); + if access_mode == AccessMode::Write { + let write_cap = self.delegate_write_cap(&from, to.user, restrict_area)?; + out.push(write_cap); + } + if store { + self.insert_caps_unchecked(out.clone())?; + } + Ok(out) + } + + pub fn delegate_read_cap( + &self, + from: &CapSelector, + to: UserId, + restrict_area: RestrictArea, + ) -> Result { + let auth = self.get_read_cap(from)?.ok_or(AuthError::NoCapability)?; + let read_cap = auth.read_cap(); + let subspace_cap = auth.subspace_cap(); + let user_id = read_cap.receiver(); + let user_secret = self + .secrets + .get_user(&user_id) + .ok_or(AuthError::MissingUserSecret(*user_id))?; + let area = restrict_area.with_default(read_cap.granted_area()); + let new_read_cap = read_cap.delegate(&user_secret, &to, &area)?; + + let new_subspace_cap = if let Some(subspace_cap) = subspace_cap { + if area.subspace().is_any() { + Some( + subspace_cap + .delegate(&user_secret, &to) + .map_err(AuthError::SubspaceCapDelegationFailed)?, + ) + } else { + None + } + } else { + None + }; + let pack = + CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap).into()); + Ok(pack) + } + + pub fn delegate_write_cap( + &self, + from: &CapSelector, + to: UserId, + restrict_area: RestrictArea, + ) -> Result { + let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; + let user_secret = self + .secrets + .get_user(&cap.receiver()) + .ok_or(AuthError::MissingUserSecret(*cap.receiver()))?; + let area = restrict_area.with_default(cap.granted_area()); + let new_cap = cap.delegate(&user_secret, &to, &area)?; + Ok(CapabilityPack::Write(new_cap.into())) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum AuthError { + #[error("invalid user id: {}", .0.fmt_short())] + InvalidUserId(UserId), + #[error("invalid namespace id: {}", .0.fmt_short())] + InvalidNamespaceId(NamespaceId), + #[error("missing user secret: {}", .0.fmt_short())] + MissingUserSecret(UserId), + #[error("missing namespace secret: {}", .0.fmt_short())] + MissingNamespaceSecret(NamespaceId), + #[error("secret store error: {0}")] + SecretStore(#[from] SecretStoreError), + #[error("no capability found")] + NoCapability, + // TODO: remove + #[error("{0}")] + Other(#[from] anyhow::Error), + #[error("Invalid capability pack")] + InvalidPack(#[from] InvalidCapabilityPack), + + #[error("Failed to create owned capability: {0}")] + CreateOwnedCap(#[from] OwnedCapabilityCreationError), + #[error("Failed to create communal capability: {0}")] + CreateCommunalCap(#[from] NamespaceIsNotCommunalError), + + #[error("Failed to delegate capability: {0}")] + DelegationFailed(#[from] FailedDelegationError), + #[error("Failed to delegate suubspace capability: {0}")] + SubspaceCapDelegationFailed(SignatureError), +} diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index ca1aecddfa..45984591b9 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -4,14 +4,13 @@ use std::{ }; use tokio::sync::broadcast; -use crate::{ - proto::{ - grouping::Area, - willow::{AuthorisedEntry, NamespaceId}, - }, - session::SessionId, +use crate::proto::{ + grouping::Area, + willow::{AuthorisedEntry, NamespaceId}, }; +pub type SessionId = u64; + use super::traits::EntryStorage; const BROADCAST_CAP: usize = 1024; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 14134bebc2..7b3dba404d 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -5,13 +5,13 @@ use std::rc::Rc; use anyhow::Result; use crate::{ - auth::{CapSelector, CapabilityPack}, + interest::{CapSelector, CapabilityPack}, proto::{ + data_model::{AuthorisedEntry, Entry, EntryExt, WriteCapability}, grouping::{Range, RangeEnd, ThreeDRange}, - keys::{NamespaceSecretKey, UserId, UserSecretKey}, - meadowcap, - sync::{Fingerprint, ReadAuthorisation}, - willow::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, + keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, + meadowcap::{self, is_wider_than, ReadAuthorisation}, + wgps::Fingerprint, }, store::traits::{self, RangeSplit, SplitAction, SplitOpts}, }; @@ -126,44 +126,44 @@ impl traits::EntryReader for Rc> { let mid = entries.get(split_index).expect("not empty"); let mut ranges = vec![]; // split in two halves by subspace - if mid.subspace_id != range.subspaces.start { + if *mid.subspace_id() != range.subspaces().start { ranges.push(ThreeDRange::new( - Range::new(range.subspaces.start, RangeEnd::Closed(mid.subspace_id)), - range.paths.clone(), - range.times, + Range::new_closed(range.subspaces().start, *mid.subspace_id()).unwrap(), + range.paths().clone(), + *range.times(), )); ranges.push(ThreeDRange::new( - Range::new(mid.subspace_id, range.subspaces.end), - range.paths.clone(), - range.times, + Range::new(*mid.subspace_id(), range.subspaces().end), + range.paths().clone(), + *range.times(), )); } // split by path - else if mid.path != range.paths.start { + else if *mid.path() != range.paths().start { ranges.push(ThreeDRange::new( - range.subspaces, + *range.subspaces(), Range::new( - range.paths.start.clone(), - RangeEnd::Closed(mid.path.clone()), + range.paths().start.clone(), + RangeEnd::Closed(mid.path().clone()), ), - range.times, + *range.times(), )); ranges.push(ThreeDRange::new( - range.subspaces, - Range::new(mid.path.clone(), range.paths.end.clone()), - range.times, + *range.subspaces(), + Range::new(mid.path().clone(), range.paths().end.clone()), + *range.times(), )); // split by time } else { ranges.push(ThreeDRange::new( - range.subspaces, - range.paths.clone(), - Range::new(range.times.start, RangeEnd::Closed(mid.timestamp)), + *range.subspaces(), + range.paths().clone(), + Range::new(range.times().start, RangeEnd::Closed(mid.timestamp())), )); ranges.push(ThreeDRange::new( - range.subspaces, - range.paths.clone(), - Range::new(mid.timestamp, range.times.end), + *range.subspaces(), + range.paths().clone(), + Range::new(mid.timestamp(), range.times().end), )); } let mut out = vec![]; @@ -210,7 +210,7 @@ impl traits::EntryStorage for Rc> { fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { let mut slf = self.borrow_mut(); - let entries = slf.entries.entry(entry.namespace_id()).or_default(); + let entries = slf.entries.entry(*entry.namespace_id()).or_default(); let new = entry.entry(); let mut to_remove = vec![]; for (i, existing) in entries.iter().enumerate() { @@ -218,15 +218,15 @@ impl traits::EntryStorage for Rc> { if existing == new { return Ok(false); } - if existing.subspace_id == new.subspace_id - && existing.path.is_prefix_of(&new.path) + if existing.subspace_id() == new.subspace_id() + && existing.path().is_prefix_of(&new.path()) && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists return Ok(false); } - if new.subspace_id == existing.subspace_id - && new.path.is_prefix_of(&existing.path) + if new.subspace_id() == existing.subspace_id() + && new.path().is_prefix_of(&existing.path()) && new.is_newer_than(existing) { to_remove.push(i); @@ -258,15 +258,13 @@ impl CapsStore { // Select the best candidate, by sorting for // * first: widest area // * then: smallest number of delegations - let best = candidates.reduce( - |prev, next| { - if next.is_wider_than(prev) { - next - } else { - prev - } - }, - ); + let best = candidates.reduce(|prev, next| { + if is_wider_than(next, prev) { + next + } else { + prev + } + }); Ok(best.cloned()) } @@ -281,7 +279,7 @@ impl CapsStore { // Select the best candidate, by sorting for // * widest area let best = candidates.reduce(|prev, next| { - if next.read_cap().is_wider_than(prev.read_cap()) { + if is_wider_than(next.read_cap(), prev.read_cap()) { next } else { prev @@ -319,15 +317,15 @@ impl CapsStore { match cap { CapabilityPack::Read(cap) => { self.read_caps - .entry(cap.read_cap().granted_namespace().id()) + .entry(*cap.read_cap().granted_namespace()) .or_default() - .push(cap); + .push(cap.into()); } CapabilityPack::Write(cap) => { self.write_caps - .entry(cap.granted_namespace().id()) + .entry(*cap.granted_namespace()) .or_default() - .push(cap); + .push(cap.into()); } } } diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 97bbf0e165..28bbd6d90f 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -3,12 +3,12 @@ use std::fmt::Debug; use anyhow::Result; use crate::{ - auth::{CapSelector, CapabilityPack}, + interest::{CapSelector, CapabilityPack}, proto::{ grouping::ThreeDRange, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, - meadowcap, - sync::{Fingerprint, ReadAuthorisation}, + meadowcap::{self, ReadAuthorisation}, + sync::Fingerprint, willow::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, }, }; @@ -102,7 +102,7 @@ pub trait EntryReader: Debug + 'static { range: &ThreeDRange, ) -> impl Iterator> { self.get_entries_with_authorisation(namespace, range) - .map(|e| e.map(|e| e.into_entry())) + .map(|e| e.map(|e| e.0)) } } From f8bdc5f6dbc0f50a392f19af929f7dc1dedf24e0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:08:43 +0200 Subject: [PATCH 121/198] wip: port the rest of it --- iroh-willow/src/engine/actor.rs | 4 +- iroh-willow/src/engine/peer_manager.rs | 5 +- iroh-willow/src/interest.rs | 70 +- iroh-willow/src/lib.rs | 8 +- iroh-willow/src/net.rs | 20 +- iroh-willow/src/proto.rs | 1 - iroh-willow/src/proto/data_model.rs | 24 +- iroh-willow/src/proto/grouping.rs | 22 +- iroh-willow/src/proto/meadowcap.rs | 10 +- iroh-willow/src/proto/wgps.rs | 2 + iroh-willow/src/proto/{ => wgps}/challenge.rs | 2 +- iroh-willow/src/proto/wgps/messages.rs | 2 +- iroh-willow/src/session.rs | 10 +- iroh-willow/src/session/aoi_finder.rs | 10 +- iroh-willow/src/session/capabilities.rs | 29 +- iroh-willow/src/session/challenge.rs | 6 + iroh-willow/src/session/data.rs | 19 +- iroh-willow/src/session/error.rs | 22 +- iroh-willow/src/session/intents.rs | 15 +- iroh-willow/src/session/pai_finder.rs | 38 +- iroh-willow/src/session/payload.rs | 4 +- iroh-willow/src/session/reconciler.rs | 20 +- iroh-willow/src/session/run.rs | 16 +- iroh-willow/src/session/static_tokens.rs | 10 +- iroh-willow/src/store/auth.rs | 42 +- iroh-willow/src/store/memory.rs | 2 +- iroh-willow/tests/basic.rs | 851 +++++++++--------- 27 files changed, 683 insertions(+), 581 deletions(-) rename iroh-willow/src/proto/{ => wgps}/challenge.rs (96%) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index d0b98c3ff1..863444b64d 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -11,8 +11,8 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ - auth::{CapSelector, CapabilityPack, DelegateTo, InterestMap}, form::{AuthForm, EntryForm, EntryOrForm}, + interest::{CapSelector, CapabilityPack, DelegateTo, InterestMap, Interests}, net::ConnHandle, proto::{ grouping::ThreeDRange, @@ -20,7 +20,7 @@ use crate::{ meadowcap::{self, AccessMode}, willow::{AuthorisedEntry, Entry}, }, - session::{intents::Intent, run_session, Error, EventSender, Interests, SessionHandle}, + session::{intents::Intent, run_session, Error, EventSender, SessionHandle}, store::{ entry::EntryOrigin, traits::{EntryReader, SecretStorage, Storage}, diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index e5b2c7ec27..4ebe426bdc 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -20,14 +20,15 @@ use tokio_util::{either::Either, sync::CancellationToken}; use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ + interest::Interests, net::{ establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_SHUTDOWN, }, - proto::sync::{AccessChallenge, InitialTransmission}, + proto::wgps::challenge::AccessChallenge, session::{ intents::{EventKind, Intent}, - Error, Interests, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, + Error, InitialTransmission, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, }, }; diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index 436532bdf9..74105bbafc 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -6,7 +6,9 @@ use crate::proto::{ data_model::{Entry, SerdeWriteCapability}, grouping::{Area, AreaExt, AreaOfInterest, Point}, keys::{NamespaceId, UserId}, - meadowcap::{serde_encoding::SerdeReadAuthorisation, McCapability, ReadAuthorisation}, + meadowcap::{ + serde_encoding::SerdeReadAuthorisation, AccessMode, McCapability, ReadAuthorisation, + }, }; pub type InterestMap = HashMap>; @@ -32,6 +34,22 @@ impl Interests { #[derive(Default, Debug)] pub struct SelectBuilder(HashMap); +pub trait IntoAreaOfInterest { + fn into_area_of_interest(self) -> AreaOfInterest; +} + +impl IntoAreaOfInterest for AreaOfInterest { + fn into_area_of_interest(self) -> AreaOfInterest { + self + } +} + +impl IntoAreaOfInterest for Area { + fn into_area_of_interest(self) -> AreaOfInterest { + AreaOfInterest::new(self, 0, 0) + } +} + impl SelectBuilder { pub fn add_full_cap(mut self, cap: impl Into) -> Self { let cap = cap.into(); @@ -42,11 +60,11 @@ impl SelectBuilder { pub fn add_area( mut self, cap: impl Into, - aois: impl IntoIterator>, + aois: impl IntoIterator, ) -> Self { let cap = cap.into(); let aois = aois.into_iter(); - let aois = aois.map(|aoi| aoi.into()); + let aois = aois.map(|aoi| aoi.into_area_of_interest()); match self.0.entry(cap) { hash_map::Entry::Vacant(entry) => { entry.insert(AreaOfInterestSelector::Exact(aois.collect())); @@ -98,7 +116,7 @@ impl CapSelector { /// Checks if the provided capability is matched by this [`CapSelector`]. pub fn is_covered_by(&self, cap: &McCapability) -> bool { self.namespace_id == *cap.granted_namespace() - && self.receiver.includes(&cap.receiver()) + && self.receiver.includes(cap.receiver()) && self.granted_area.is_covered_by(&cap.granted_area()) } @@ -209,8 +227,16 @@ impl CapabilityPack { } pub fn validate(&self) -> Result<(), InvalidCapabilityPack> { - // meadowcap capability are validated on deserialization. - Ok(()) + // meadowcap capability themselves are validated on creation/deserialization. + let is_valid = match self { + Self::Read(cap) => cap.read_cap().access_mode() == AccessMode::Read, + Self::Write(cap) => cap.0.access_mode() == AccessMode::Write, + }; + if !is_valid { + Err(InvalidCapabilityPack) + } else { + Ok(()) + } // match self { // CapabilityPack::Read(auth) => { // auth.read_cap().validate()?; @@ -229,3 +255,35 @@ impl CapabilityPack { #[derive(Debug, thiserror::Error)] #[error("Invalid capability pack.")] pub struct InvalidCapabilityPack; + +// TODO: This doesn't really belong into this module. +#[derive(Debug, Clone)] +pub struct DelegateTo { + pub user: UserId, + pub restrict_area: RestrictArea, +} + +impl DelegateTo { + pub fn new(user: UserId, restrict_area: RestrictArea) -> Self { + Self { + user, + restrict_area, + } + } +} + +// TODO: This doesn't really belong into this module. +#[derive(Debug, Clone)] +pub enum RestrictArea { + None, + Restrict(Area), +} + +impl RestrictArea { + pub fn with_default(self, default: Area) -> Area { + match self { + RestrictArea::None => default.clone(), + RestrictArea::Restrict(area) => area, + } + } +} diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 715dc73973..5473ba4476 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -3,12 +3,12 @@ #![allow(missing_docs)] #![deny(unsafe_code)] -// pub mod engine; +pub mod engine; pub mod form; -// pub mod net; -pub mod proto; -// pub mod session; pub mod interest; +pub mod net; +pub mod proto; +pub mod session; pub mod store; pub mod util; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ee8fcfe88b..3242d1a63a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -10,15 +10,15 @@ use tracing::{debug, trace}; use crate::{ proto::sync::{ - AccessChallenge, ChallengeHash, Channel, InitialTransmission, LogicalChannel, Message, - CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, + AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, + MAX_PAYLOAD_SIZE_POWER, }, session::{ channels::{ ChannelReceivers, ChannelSenders, Channels, LogicalChannelReceivers, LogicalChannelSenders, }, - Role, + InitialTransmission, Role, }, util::channel::{ inbound_channel, outbound_channel, Guarantees, Reader, Receiver, Sender, Writer, @@ -395,18 +395,18 @@ mod tests { use tracing::{info, Instrument}; use crate::{ - auth::{CapSelector, DelegateTo, RestrictArea}, engine::ActorHandle, form::{AuthForm, EntryForm, PayloadForm, SubspaceForm, TimestampForm}, + interest::{CapSelector, DelegateTo, Interests, RestrictArea}, net::{terminate_gracefully, ConnHandle}, proto::{ + data_model::{Entry, InvalidPathError2, Path, PathExt}, grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, sync::AccessChallenge, - willow::{Entry, InvalidPath, Path}, }, - session::{intents::Intent, Interests, Role, SessionHandle, SessionInit, SessionMode}, + session::{intents::Intent, Role, SessionHandle, SessionInit, SessionMode}, }; use super::{establish, prepare_channels}; @@ -469,7 +469,7 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( CapSelector::widest(namespace_id), - AccessMode::ReadWrite, + AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) .await?; @@ -602,7 +602,7 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( CapSelector::widest(namespace_id), - AccessMode::ReadWrite, + AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) .await?; @@ -774,7 +774,7 @@ mod tests { async fn get_entries(store: &ActorHandle, namespace: NamespaceId) -> Result> { let entries: Result> = store - .get_entries(namespace, ThreeDRange::full()) + .get_entries(namespace, ThreeDRange::new_full()) .await? .try_collect() .await; @@ -786,7 +786,7 @@ mod tests { namespace_id: NamespaceId, user_id: UserId, count: usize, - path_fn: impl Fn(usize) -> Result, + path_fn: impl Fn(usize) -> Result, content_fn: impl Fn(usize) -> String, track_entries: &mut impl Extend, ) -> Result<()> { diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index ab1a399d1d..0fca113b11 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,4 +1,3 @@ -pub mod challenge; pub mod data_model; pub mod grouping; pub mod keys; diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index afcfa230f8..ba1de841d6 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -1,8 +1,10 @@ use iroh_base::hash::Hash; use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; -use willow_data_model::InvalidPathError; +use willow_data_model::{AuthorisationToken as _, InvalidPathError}; use willow_encoding::sync::{Decodable, Encodable}; +use crate::proto::keys::UserSignature; + use super::{ keys, meadowcap::{self}, @@ -125,7 +127,6 @@ impl PathExt for Path { } } -// #[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] pub type Entry = willow_data_model::Entry< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -172,8 +173,27 @@ impl AuthorisedEntry { pub fn entry(&self) -> &Entry { &self.0 } + + pub fn try_authorise(entry: Entry, token: AuthorisationToken) -> Result { + if token.is_authorised_write(&entry) { + Ok(AuthorisedEntry(entry, token)) + } else { + Err(Unauthorised) + } + } + + pub fn into_parts(self) -> (Entry, AuthorisationToken) { + (self.0, self.1) + } } +/// Error returned for entries that are not authorised. +/// +/// See [`is_authorised_write`] for details. +#[derive(Debug, thiserror::Error)] +#[error("Entry is not authorised")] +pub struct Unauthorised; + // #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] // pub type AuthorisedEntry = // willow_data_model::AuthorisedEntry< diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 4689b172d9..cadf82452f 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,4 +1,5 @@ pub use willow_data_model::grouping::{Range, RangeEnd}; +use willow_data_model::SubspaceId as _; use super::data_model::{ Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, @@ -85,6 +86,7 @@ impl AreaOfInterestExt for AreaOfInterest { pub trait AreaExt { fn includes_point(&self, point: &Point) -> bool; fn new_path(path: Path) -> Area; + fn into_range(&self) -> Range3d; } impl AreaExt for Area { @@ -95,6 +97,22 @@ impl AreaExt for Area { fn new_path(path: Path) -> Self { Self::new(AreaSubspace::Any, path, Range::full()) } + + fn into_range(&self) -> Range3d { + let subspaces = match self.subspace() { + AreaSubspace::Id(id) => match id.successor() { + None => Range::new_open(*id), + Some(end) => Range::new_closed(*id, end).expect("successor is bigger"), + }, + AreaSubspace::Any => Default::default(), + }; + let path = self.path(); + let path_range = match path.greater_but_not_prefixed() { + None => Range::new_open(path.clone()), + Some(end) => Range::new_closed(path.clone(), end).expect("successor is bigger"), + }; + Range3d::new(subspaces, path_range, self.times().clone()) + } } // impl Area { @@ -161,7 +179,7 @@ pub mod serde_encoding { #[derive( Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, )] - pub struct SerdeAreaOfInterest(AreaOfInterest); + pub struct SerdeAreaOfInterest(pub AreaOfInterest); impl Serialize for SerdeAreaOfInterest { fn serialize(&self, serializer: S) -> Result { @@ -203,7 +221,7 @@ pub mod serde_encoding { } #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] - pub struct SerdeRange3d(Range3d); + pub struct SerdeRange3d(pub Range3d); impl Serialize for SerdeRange3d { fn serialize(&self, serializer: S) -> Result { diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index f329955507..b9d1381383 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -40,6 +40,10 @@ pub type McSubspaceCapability = meadowcap::McSubspaceCapability< keys::UserSignature, >; +pub type SubspaceCapability = McSubspaceCapability; +pub type ReadCapability = McCapability; +pub type WriteCapability = McCapability; + pub type McAuthorisationToken = meadowcap::McAuthorisationToken< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -78,18 +82,18 @@ impl ReadAuthorisation { } pub fn new_owned( - namespace_secret: NamespaceSecretKey, + namespace_secret: &NamespaceSecretKey, user_key: UserId, ) -> anyhow::Result { let read_cap = McCapability::new_owned( namespace_secret.public_key().id(), - &namespace_secret, + namespace_secret, user_key, AccessMode::Read, )?; let subspace_cap = meadowcap::McSubspaceCapability::new( namespace_secret.public_key().id(), - &namespace_secret, + namespace_secret, user_key, )?; Ok(Self::new(read_cap, Some(subspace_cap))) diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index f16a7323af..f60cbf56ac 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,8 +1,10 @@ +pub mod challenge; pub mod channels; pub mod fingerprint; pub mod handles; pub mod messages; +pub use challenge::*; pub use channels::*; pub use fingerprint::*; pub use handles::*; diff --git a/iroh-willow/src/proto/challenge.rs b/iroh-willow/src/proto/wgps/challenge.rs similarity index 96% rename from iroh-willow/src/proto/challenge.rs rename to iroh-willow/src/proto/wgps/challenge.rs index e2a5b4e397..3768c0b8a7 100644 --- a/iroh-willow/src/proto/challenge.rs +++ b/iroh-willow/src/proto/wgps/challenge.rs @@ -4,7 +4,7 @@ use rand::Rng; use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; -use super::data_model::DIGEST_LENGTH; +use crate::proto::data_model::DIGEST_LENGTH; pub const CHALLENGE_LENGTH: usize = 32; pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index 9835052e5d..2fc1a41f91 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -4,13 +4,13 @@ use serde::{Deserialize, Serialize}; use crate::{ proto::{ - challenge::AccessChallenge, data_model::serde_encoding::SerdeEntry, grouping::{ serde_encoding::{SerdeAreaOfInterest, SerdeRange3d}, Area, }, meadowcap::{self}, + wgps::AccessChallenge, willow::Entry, }, util::codec::{DecodeOutcome, Decoder, Encoder}, diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index ad4708f7cd..0529fe5d6a 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,20 +1,17 @@ -use std::{ - collections::{hash_map, BTreeSet, HashMap, HashSet}, - sync::Arc, -}; +use std::sync::Arc; use channels::ChannelSenders; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use crate::{ - auth::CapSelector, - proto::{grouping::AreaOfInterest, sync::ReadAuthorisation}, + interest::Interests, session::{error::ChannelReceiverDropped, intents::Intent}, }; mod aoi_finder; mod capabilities; +mod challenge; pub mod channels; mod data; pub mod error; @@ -26,6 +23,7 @@ mod resource; mod run; mod static_tokens; +pub(crate) use self::challenge::InitialTransmission; pub(crate) use self::channels::Channels; pub(crate) use self::error::Error; pub(crate) use self::run::run_session; diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 39536aeb6f..3b1a49bfa4 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -4,13 +4,13 @@ use futures_lite::{Stream, StreamExt}; use genawaiter::rc::Co; use crate::{ - auth::InterestMap, + interest::InterestMap, proto::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, + meadowcap::{ReadAuthorisation, ReadCapability}, sync::{ - AreaOfInterestHandle, CapabilityHandle, IntersectionHandle, ReadAuthorisation, - ReadCapability, SetupBindAreaOfInterest, + AreaOfInterestHandle, CapabilityHandle, IntersectionHandle, SetupBindAreaOfInterest, }, }, session::{ @@ -154,7 +154,7 @@ impl IntersectionFinder { self.co .yield_(Output::SignAndSendCapability { handle, - capability: authorisation.read_cap().clone(), + capability: authorisation.read_cap().clone().into(), }) .await; } @@ -185,7 +185,7 @@ impl AoiResources { self.bind_validated(co, Scope::Ours, namespace, aoi.clone()) .await; let msg = SetupBindAreaOfInterest { - area_of_interest: aoi, + area_of_interest: aoi.into(), authorisation, }; co.yield_(Output::SendMessage(msg)).await; diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index c1cdff26f9..eb4bb6f873 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -7,15 +7,14 @@ use std::{ use crate::{ proto::{ - challenge::ChallengeState, keys::UserSignature, + meadowcap::{ReadCapability, SubspaceCapability}, sync::{ AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, IntersectionHandle, - PaiReplySubspaceCapability, ReadCapability, SetupBindReadCapability, - SubspaceCapability, + PaiReplySubspaceCapability, SetupBindReadCapability, }, }, - session::{resource::ResourceMap, Error, Role}, + session::{challenge::ChallengeState, resource::ResourceMap, Error, Role}, store::traits::SecretStorage, }; @@ -72,9 +71,9 @@ impl Capabilities { ) -> Result { let inner = self.0.borrow(); let signable = inner.challenge.signable()?; - let signature = secret_store.sign_user(&capability.receiver().id(), &signable)?; + let signature = secret_store.sign_user(&capability.receiver(), &signable)?; Ok(SetupBindReadCapability { - capability, + capability: capability.into(), handle: intersection_handle, signature, }) @@ -89,9 +88,12 @@ impl Capabilities { capability: ReadCapability, signature: UserSignature, ) -> Result<(), Error> { - capability.validate()?; + // TODO(Frando): I *think* meadowcap caps are always validated (no way to construct invalid ones). + // capability.validate()?; let mut inner = self.0.borrow_mut(); - inner.challenge.verify(capability.receiver(), &signature)?; + // TODO(Frando): We should somehow remove the `Id`/`PublicKey` split. + let receiver_key = capability.receiver().into_public_key()?; + inner.challenge.verify(&receiver_key, &signature)?; inner.theirs.bind(capability); Ok(()) } @@ -110,11 +112,14 @@ impl Capabilities { capability: &SubspaceCapability, signature: &UserSignature, ) -> Result<(), Error> { - capability.validate()?; + // TODO(Frando): I *think* meadowcap caps are always validated (no way to construct invalid ones). + // capability.validate()?; + // TODO(Frando): We should somehow remove the `Id`/`PublicKey` split. + let receiver_key = capability.receiver().into_public_key()?; self.0 .borrow_mut() .challenge - .verify(capability.receiver(), signature)?; + .verify(&receiver_key, signature)?; Ok(()) } @@ -148,10 +153,10 @@ impl Capabilities { ) -> Result { let inner = self.0.borrow(); let signable = inner.challenge.signable()?; - let signature = secrets.sign_user(&cap.receiver().id(), &signable)?; + let signature = secrets.sign_user(&cap.receiver(), &signable)?; let message = PaiReplySubspaceCapability { handle, - capability: cap.clone(), + capability: cap.clone().into(), signature, }; Ok(message) diff --git a/iroh-willow/src/session/challenge.rs b/iroh-willow/src/session/challenge.rs index 4d95da33ec..c3664128ef 100644 --- a/iroh-willow/src/session/challenge.rs +++ b/iroh-willow/src/session/challenge.rs @@ -1,3 +1,9 @@ +use super::{Error, Role}; +use crate::proto::{ + keys::{UserPublicKey, UserSecretKey, UserSignature}, + wgps::challenge::{AccessChallenge, AccessChallengeBytes, ChallengeHash}, +}; + /// Data from the initial transmission /// /// This happens before the session is initialized. diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index f9c02b8d23..2aaea50404 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -4,8 +4,8 @@ use tokio_stream::wrappers::ReceiverStream; use crate::{ proto::{ - sync::{DataMessage, DataSendEntry, DataSendPayload}, - willow::AuthorisedEntry, + data_model::AuthorisedEntry, + wgps::{DataMessage, DataSendEntry, DataSendPayload, StaticToken}, }, session::{ channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, @@ -85,17 +85,18 @@ impl DataSender { } async fn send_entry(&mut self, authorised_entry: AuthorisedEntry) -> Result<(), Error> { - let (entry, token) = authorised_entry.into_parts(); - let (static_token, dynamic_token) = token.into_parts(); + let AuthorisedEntry(entry, token) = authorised_entry; + let static_token: StaticToken = token.capability.into(); + let dynamic_token = token.signature; // TODO: partial payloads // let available = entry.payload_length; let static_token_handle = self .static_tokens .bind_and_send_ours(static_token, &self.send) .await?; - let digest = entry.payload_digest; + let digest = *entry.payload_digest(); let msg = DataSendEntry { - entry, + entry: entry.into(), static_token_handle, dynamic_token, offset: 0, @@ -150,7 +151,7 @@ impl DataReceiver { let authorised_entry = self .static_tokens .authorise_entry_eventually( - message.entry, + message.entry.into(), message.static_token_handle, message.dynamic_token, ) @@ -162,10 +163,10 @@ impl DataReceiver { channel: EntryChannel::Data, }, )?; - let entry = authorised_entry.into_entry(); + let entry = authorised_entry.0; // TODO: handle offset self.current_payload - .set(entry.payload_digest, entry.payload_length)?; + .set(*entry.payload_digest(), entry.payload_length())?; Ok(()) } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 9588e616ba..9eff5f0c31 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -18,7 +18,7 @@ pub enum Error { #[error("local store failed: {0}")] Store(#[from] anyhow::Error), #[error("authentication error: {0}")] - Auth(#[from] crate::auth::AuthError), + Auth(#[from] crate::store::auth::AuthError), #[error("payload store failed: {0}")] PayloadStore(std::io::Error), #[error("payload digest does not match expected digest")] @@ -119,11 +119,11 @@ impl From for Error { Self::UnauthorisedEntryReceived } } -impl From for Error { - fn from(_value: meadowcap::InvalidCapability) -> Self { - Self::InvalidCapability - } -} +// impl From for Error { +// fn from(_value: meadowcap::InvalidCapability) -> Self { +// Self::InvalidCapability +// } +// } impl From for Error { fn from(_value: SignatureError) -> Self { @@ -131,11 +131,11 @@ impl From for Error { } } -impl From for Error { - fn from(_value: meadowcap::InvalidParams) -> Self { - Self::InvalidParameters("") - } -} +// impl From for Error { +// fn from(_value: meadowcap::InvalidParams) -> Self { +// Self::InvalidParameters("") +// } +// } impl From for Error { fn from(value: MissingResource) -> Self { diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 7b76a67c1b..903dfa500e 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -17,13 +17,13 @@ use tokio_util::sync::PollSender; use tracing::{debug, trace, warn}; use crate::{ - auth::{Auth, InterestMap}, + interest::InterestMap, proto::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, }, session::{error::ChannelReceiverDropped, Error, Interests, SessionInit, SessionMode}, - store::traits::Storage, + store::{auth::Auth, traits::Storage}, util::gen_stream::GenStream, }; @@ -526,7 +526,11 @@ impl IntentInfo { fn matches_area(&self, namespace: &NamespaceId, area: &Area) -> bool { self.interests .get(namespace) - .map(|interests| interests.iter().any(|x| x.area.has_intersection(area))) + .map(|interests| { + interests + .iter() + .any(|x| x.area.intersection(area).is_some()) + }) .unwrap_or(false) } @@ -534,7 +538,10 @@ impl IntentInfo { let mut namespace_complete = false; let mut matches = false; if let Some(interests) = self.interests.get_mut(namespace) { - if interests.iter().any(|x| x.area.has_intersection(area)) { + if interests + .iter() + .any(|x| x.area.intersection(area).is_some()) + { matches = true; interests.retain(|x| !area.includes_area(&x.area)); if interests.is_empty() { diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 8f956e5d74..86615b7774 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -17,11 +17,12 @@ use tracing::{debug, trace}; use crate::{ proto::{ - grouping::SubspaceArea, + grouping::AreaSubspace, + meadowcap::{ReadAuthorisation, SubspaceCapability}, pai::{Fragment, FragmentKind, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, sync::{ IntersectionHandle, IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, - PaiRequestSubspaceCapability, ReadAuthorisation, SubspaceCapability, + PaiRequestSubspaceCapability, }, willow::{NamespaceId, Path}, }, @@ -387,7 +388,7 @@ pub struct LocalFragmentInfo { path: Path, // will be needed for spec-compliant encodings of read capabilities #[allow(dead_code)] - subspace: SubspaceArea, + subspace: AreaSubspace, } impl LocalFragmentInfo { @@ -504,13 +505,14 @@ mod tests { use crate::{ proto::{ - grouping::{Area, SubspaceArea}, - keys::{NamespaceKind, NamespaceSecretKey, UserPublicKey, UserSecretKey}, - sync::{ + grouping::{Area, AreaSubspace}, + keys::{NamespaceKind, NamespaceSecretKey, UserId, UserPublicKey, UserSecretKey}, + meadowcap::ReadAuthorisation, + wgps::{ IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, - PaiRequestSubspaceCapability, ReadAuthorisation, + PaiRequestSubspaceCapability, }, - willow::Path, + willow::{Path, PathExt}, }, session::{pai_finder::PaiIntersection, Error}, }; @@ -529,8 +531,8 @@ mod tests { let (_, alfie_public) = keypair(&mut rng); let (_, betty_public) = keypair(&mut rng); - let auth_alfie = ReadAuthorisation::new_owned(&namespace_secret, alfie_public); - let auth_betty = ReadAuthorisation::new_owned(&namespace_secret, betty_public); + let auth_alfie = ReadAuthorisation::new_owned(&namespace_secret, alfie_public).unwrap(); + let auth_betty = ReadAuthorisation::new_owned(&namespace_secret, betty_public).unwrap(); let (alfie, betty) = Handle::create_two(); @@ -559,15 +561,15 @@ mod tests { let namespace = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); let (root_secret, root_public) = keypair(&mut rng); - let root_auth = ReadAuthorisation::new_owned(&namespace, root_public); + let root_auth = ReadAuthorisation::new_owned(&namespace, root_public).unwrap(); let (_, alfie_public) = keypair(&mut rng); let (_, betty_public) = keypair(&mut rng); let (_, gemma_public) = keypair(&mut rng); let alfie_area = Area::new( - SubspaceArea::Id(gemma_public.id()), - Path::empty(), + AreaSubspace::Id(gemma_public), + Path::new_empty(), Default::default(), ); let alfie_auth = root_auth @@ -576,7 +578,7 @@ mod tests { assert!(alfie_auth.subspace_cap().is_none()); let betty_area = Area::new( - SubspaceArea::Any, + AreaSubspace::Any, Path::new(&[b"chess"]).unwrap(), Default::default(), ); @@ -613,9 +615,9 @@ mod tests { }; assert_eq!(&cap, betty_auth.subspace_cap().unwrap()); - let namespace = cap.granted_namespace().id(); + let namespace = cap.granted_namespace(); alfie - .input(Input::ReceivedVerifiedSubspaceCapReply(handle, namespace)) + .input(Input::ReceivedVerifiedSubspaceCapReply(handle, *namespace)) .await; let next = alfie.next_intersection().await; @@ -631,10 +633,10 @@ mod tests { betty.join().await; } - fn keypair(rng: &mut R) -> (UserSecretKey, UserPublicKey) { + fn keypair(rng: &mut R) -> (UserSecretKey, UserId) { let secret = UserSecretKey::generate(rng); let public = secret.public_key(); - (secret, public) + (secret, public.id()) } async fn transfer + Into>(from: &Handle, to: &Handle) { diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index c29c6045f6..dae6e8c17c 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -23,7 +23,7 @@ pub async fn send_payload_chunked( map: impl Fn(Bytes) -> Message, ) -> Result { let payload_entry = payload_store - .get(&digest) + .get(&digest.0) .await .map_err(Error::PayloadStore)?; if let Some(entry) = payload_entry { @@ -129,7 +129,7 @@ impl CurrentPayload { .ok_or_else(|| Error::InvalidMessageInCurrentState)?; drop(writer.sender); let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; - if *tag.hash() != state.payload_digest { + if *tag.hash() != state.payload_digest.0 { return Err(Error::PayloadDigestMismatch); } if len != state.expected_length { diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 28082d8f05..754acce9fe 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -26,7 +26,7 @@ pub enum Output { use crate::{ proto::{ - grouping::{AreaOfInterest, ThreeDRange}, + grouping::{AreaExt, AreaOfInterest, ThreeDRange}, keys::NamespaceId, sync::{ AreaOfInterestHandle, Fingerprint, IsHandle, LengthyEntry, @@ -149,13 +149,13 @@ impl Reconciler { .shared .static_tokens .authorise_entry_eventually( - message.entry.entry, + message.entry.entry.into(), message.static_token_handle, message.dynamic_token, ) .await?; self.current_entry.received_entry( - authorised_entry.entry().payload_digest, + *authorised_entry.entry().payload_digest(), message.entry.available, )?; self.shared.store.entries().ingest( @@ -517,7 +517,7 @@ impl Target { ) -> anyhow::Result<()> { self.mark_our_next_range_pending(); let msg = ReconciliationSendFingerprint { - range, + range: range.into(), fingerprint, sender_handle: self.intersection.our_handle, receiver_handle: self.intersection.their_handle, @@ -540,7 +540,7 @@ impl Target { None => self.snapshot.count(self.namespace(), range)?, }; let msg = ReconciliationAnnounceEntries { - range: range.clone(), + range: range.clone().into(), count: our_entry_count, want_response, will_sort: false, // todo: sorted? @@ -559,18 +559,20 @@ impl Target { { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); - let (static_token, dynamic_token) = token.into_parts(); + + let static_token = token.capability.into(); + let dynamic_token = token.signature; // TODO: partial payloads - let available = entry.payload_length; + let available = entry.payload_length(); let static_token_handle = shared .static_tokens .bind_and_send_ours(static_token, &shared.send) .await?; - let digest = entry.payload_digest; + let digest = *entry.payload_digest(); let msg = ReconciliationSendEntry { entry: LengthyEntry::new(entry, available), static_token_handle, - dynamic_token, + dynamic_token: dynamic_token.into(), }; shared.send.send(msg).await?; diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index ae23a8a8a8..9b4e88a1e8 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -10,7 +10,10 @@ use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ net::ConnHandle, - proto::sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, + proto::{ + sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, + willow::Unauthorised, + }, session::{ aoi_finder::{self, IntersectionFinder}, capabilities::Capabilities, @@ -283,7 +286,7 @@ pub(crate) async fn run_session( let caps_recv_loop = with_span(error_span!("caps_recv"), async { while let Some(message) = capability_recv.try_next().await? { let handle = message.handle; - caps.validate_and_bind_theirs(message.capability, message.signature)?; + caps.validate_and_bind_theirs(message.capability.0, message.signature)?; pai_inbox .send(pai::Input::ReceivedReadCapForIntersection(handle)) .await?; @@ -318,9 +321,12 @@ pub(crate) async fn run_session( area_of_interest, authorisation, } = message; + let area_of_interest = area_of_interest.0; let cap = caps.get_theirs_eventually(authorisation).await; - cap.try_granted_area(&area_of_interest.area)?; - let namespace = cap.granted_namespace().id(); + if !cap.granted_area().includes_area(&area_of_interest.area) { + return Err(Unauthorised.into()); + } + let namespace = *cap.granted_namespace(); intersection_inbox .send(aoi_finder::Input::ReceivedValidatedAoi { namespace, @@ -432,7 +438,7 @@ async fn control_loop( pai_inbox .send(pai::Input::ReceivedVerifiedSubspaceCapReply( msg.handle, - msg.capability.granted_namespace().id(), + *msg.capability.granted_namespace(), )) .await?; } diff --git a/iroh-willow/src/session/static_tokens.rs b/iroh-willow/src/session/static_tokens.rs index db954df86a..7856f36220 100644 --- a/iroh-willow/src/session/static_tokens.rs +++ b/iroh-willow/src/session/static_tokens.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ proto::{ sync::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, - willow::{AuthorisedEntry, Entry}, + willow::{AuthorisationToken, AuthorisedEntry, Entry}, }, session::{channels::ChannelSenders, resource::ResourceMap, Error}, }; @@ -54,8 +54,12 @@ impl StaticTokens { }) .await; - let authorised_entry = - AuthorisedEntry::try_from_parts(entry, static_token.clone(), dynamic_token)?; + let token = AuthorisationToken { + signature: dynamic_token, + capability: static_token.into(), + }; + + let authorised_entry = AuthorisedEntry::try_authorise(entry, token)?; Ok(authorised_entry) } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 64112412f7..b74d9129f6 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -7,8 +7,8 @@ use tracing::debug; use crate::{ interest::{ - AreaOfInterestSelector, CapSelector, CapabilityPack, InterestMap, Interests, - InvalidCapabilityPack, + AreaOfInterestSelector, CapSelector, CapabilityPack, DelegateTo, InterestMap, Interests, + InvalidCapabilityPack, RestrictArea, }, proto::{ data_model::WriteCapability, @@ -19,36 +19,6 @@ use crate::{ store::traits::{CapsStorage, SecretStorage, SecretStoreError, Storage}, }; -#[derive(Debug, Clone)] -pub struct DelegateTo { - pub user: UserId, - pub restrict_area: RestrictArea, -} - -impl DelegateTo { - pub fn new(user: UserId, restrict_area: RestrictArea) -> Self { - Self { - user, - restrict_area, - } - } -} - -#[derive(Debug, Clone)] -pub enum RestrictArea { - None, - Restrict(Area), -} - -impl RestrictArea { - pub fn with_default(self, default: Area) -> Area { - match self { - RestrictArea::None => default.clone(), - RestrictArea::Restrict(area) => area, - } - } -} - #[derive(Debug, Clone)] pub struct Auth { secrets: S::Secrets, @@ -84,6 +54,7 @@ impl Auth { caps: impl IntoIterator, ) -> Result<(), AuthError> { for cap in caps.into_iter() { + tracing::debug!("import cap {cap:?}"); cap.validate()?; // Only allow importing caps we can use. // TODO: Is this what we want? @@ -92,6 +63,7 @@ impl Auth { return Err(AuthError::MissingUserSecret(user_id)); } self.caps.insert(cap)?; + tracing::debug!("imported"); } Ok(()) } @@ -114,7 +86,7 @@ impl Auth { .list_read_caps()? .map(|auth| { let area = auth.read_cap().granted_area(); - let aoi = AreaOfInterest::new(area); + let aoi = AreaOfInterest::new(area, 0, 0); (auth, HashSet::from_iter([aoi])) }) .collect::>(); @@ -129,7 +101,7 @@ impl Auth { match aoi_selector { AreaOfInterestSelector::Widest => { let area = cap.read_cap().granted_area(); - let aoi = AreaOfInterest::new(area); + let aoi = AreaOfInterest::new(area, 0, 0); entry.insert(aoi); } AreaOfInterestSelector::Exact(aois) => { @@ -195,7 +167,7 @@ impl Auth { .secrets .get_namespace(&namespace_key) .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; - McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Read)? + McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Write)? }; let pack = CapabilityPack::Write(cap.into()); Ok(pack) diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 7b3dba404d..a630745ebd 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -10,7 +10,7 @@ use crate::{ data_model::{AuthorisedEntry, Entry, EntryExt, WriteCapability}, grouping::{Range, RangeEnd, ThreeDRange}, keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, - meadowcap::{self, is_wider_than, ReadAuthorisation}, + meadowcap::{self, is_wider_than, ReadAuthorisation, AccessMode}, wgps::Fingerprint, }, store::traits::{self, RangeSplit, SplitAction, SplitOpts}, diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index f0679deed2..cc509a1375 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,427 +1,424 @@ -// use std::time::Duration; - -// use anyhow::Result; -// use futures_concurrency::future::TryJoin; -// use futures_lite::StreamExt; - -// use iroh_willow::{ -// proto::{grouping::Area, willow::Path}, -// session::{ -// intents::{Completion, EventKind}, -// Interests, SessionInit, SessionMode, -// }, -// }; - -// use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; - -// #[tokio::test(flavor = "multi_thread")] -// async fn peer_manager_two_intents() -> Result<()> { -// iroh_test::logging::setup_multithreaded(); -// let mut rng = create_rng("peer_manager_two_intents"); - -// let [alfie, betty] = spawn_two(&mut rng).await?; -// let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; -// let betty_node_id = betty.node_id(); - -// insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; -// insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; -// insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; - -// let task_foo_path = tokio::task::spawn({ -// let alfie = alfie.clone(); -// async move { -// let path = Path::new(&[b"foo"]).unwrap(); - -// let init = SessionInit::new( -// Interests::builder().add_area(namespace, [Area::path(path.clone())]), -// SessionMode::ReconcileOnce, -// ); -// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::CapabilityIntersection { -// namespace, -// area: Area::full(), -// } -// ); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::InterestIntersection { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::Reconciled { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); - -// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - -// assert!(intent.next().await.is_none()); -// } -// }); - -// let task_bar_path = tokio::task::spawn({ -// let alfie = alfie.clone(); -// async move { -// let path = Path::new(&[b"bar"]).unwrap(); - -// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); -// let init = SessionInit::new(interests, SessionMode::ReconcileOnce); - -// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::CapabilityIntersection { -// namespace, -// area: Area::full(), -// } -// ); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::InterestIntersection { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::Reconciled { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); - -// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - -// assert!(intent.next().await.is_none()); -// } -// }); - -// task_foo_path.await.unwrap(); -// task_bar_path.await.unwrap(); - -// // tokio::time::sleep(std::time::Duration::from_secs(1)).await; - -// [alfie, betty].map(Peer::shutdown).try_join().await?; - -// Ok(()) -// } - -// #[tokio::test(flavor = "multi_thread")] -// async fn peer_manager_update_intent() -> Result<()> { -// iroh_test::logging::setup_multithreaded(); -// let mut rng = create_rng("peer_manager_update_intent"); - -// let [alfie, betty] = spawn_two(&mut rng).await?; -// let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; -// let betty_node_id = betty.node_id(); - -// insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; -// insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - -// let path = Path::new(&[b"foo"]).unwrap(); -// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); -// let init = SessionInit::new(interests, SessionMode::Live); -// let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::CapabilityIntersection { -// namespace, -// area: Area::full(), -// } -// ); -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::InterestIntersection { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::Reconciled { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); -// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - -// let path = Path::new(&[b"bar"]).unwrap(); -// let interests = Interests::builder().add_area(namespace, [Area::path(path.clone())]); -// intent.add_interests(interests).await?; - -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::InterestIntersection { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); -// assert_eq!( -// intent.next().await.unwrap(), -// EventKind::Reconciled { -// namespace, -// area: Area::path(path.clone()).into() -// } -// ); - -// assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - -// intent.close().await; - -// assert!(intent.next().await.is_none(),); - -// [alfie, betty].map(Peer::shutdown).try_join().await?; -// Ok(()) -// } - -// /// Test immediate shutdown. -// // TODO: This does not really test much. Used it for log reading of graceful connection termination. -// // Not sure where we should expose whether connections closed gracefully or not? -// #[tokio::test(flavor = "multi_thread")] -// async fn peer_manager_shutdown_immediate() -> Result<()> { -// iroh_test::logging::setup_multithreaded(); -// let mut rng = create_rng("peer_manager_shutdown_immediate"); - -// let [alfie, betty] = spawn_two(&mut rng).await?; -// let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; -// let betty_node_id = betty.node_id(); -// let mut intent = alfie -// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) -// .await?; -// let completion = intent.complete().await?; -// assert_eq!(completion, Completion::Complete); -// [alfie, betty].map(Peer::shutdown).try_join().await?; -// Ok(()) -// } - -// /// Test shutdown after a timeout. -// // TODO: This does not really test much. Used it for log reading of graceful connection termination. -// // Not sure where we should expose whether connections closed gracefully or not? -// #[tokio::test(flavor = "multi_thread")] -// async fn peer_manager_shutdown_timeout() -> Result<()> { -// iroh_test::logging::setup_multithreaded(); -// let mut rng = create_rng("peer_manager_shutdown_timeout"); - -// let [alfie, betty] = spawn_two(&mut rng).await?; -// let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; -// let betty_node_id = betty.node_id(); -// let mut intent = alfie -// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) -// .await?; -// let completion = intent.complete().await?; -// assert_eq!(completion, Completion::Complete); -// tokio::time::sleep(Duration::from_secs(1)).await; -// [alfie, betty].map(Peer::shutdown).try_join().await?; -// Ok(()) -// } - -// #[tokio::test(flavor = "multi_thread")] -// async fn peer_manager_twoway_loop() -> Result<()> { -// iroh_test::logging::setup_multithreaded(); -// let mut rng = create_rng("peer_manager_twoway_loop"); - -// let [alfie, betty] = spawn_two(&mut rng).await?; -// let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; -// insert(&alfie, namespace, alfie_user, &[b"foo"], "foo 1").await?; -// insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; -// let alfie_node_id = alfie.node_id(); -// let betty_node_id = betty.node_id(); -// for _i in 0..20 { -// let alfie = alfie.clone(); -// let betty = betty.clone(); -// let task_alfie = tokio::task::spawn(async move { -// let mut intent = alfie -// .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) -// .await -// .unwrap(); -// let completion = intent.complete().await.expect("failed to complete intent"); -// assert_eq!(completion, Completion::Complete); -// }); - -// let task_betty = tokio::task::spawn(async move { -// let mut intent = betty -// .sync_with_peer(alfie_node_id, SessionInit::reconcile_once(Interests::all())) -// .await -// .unwrap(); -// let completion = intent.complete().await.expect("failed to complete intent"); -// assert_eq!(completion, Completion::Complete); -// }); -// task_alfie.await.unwrap(); -// task_betty.await.unwrap(); -// } -// [alfie, betty].map(Peer::shutdown).try_join().await?; -// Ok(()) -// } - -// mod util { -// use std::sync::{Arc, Mutex}; - -// use anyhow::Result; -// use bytes::Bytes; -// use futures_concurrency::future::TryJoin; -// use iroh_net::{Endpoint, NodeId}; -// use rand::SeedableRng; -// use rand_chacha::ChaCha12Rng; -// use rand_core::CryptoRngCore; -// use tokio::task::JoinHandle; - -// use iroh_willow::{ -// auth::{CapSelector, DelegateTo, RestrictArea}, -// engine::{AcceptOpts, Engine}, -// form::EntryForm, -// net::ALPN, -// proto::{ -// keys::{NamespaceId, NamespaceKind, UserId}, -// meadowcap::AccessMode, -// willow::Path, -// }, -// }; - -// pub fn create_rng(seed: &str) -> ChaCha12Rng { -// let seed = iroh_base::hash::Hash::new(seed); -// ChaCha12Rng::from_seed(*(seed.as_bytes())) -// } - -// #[derive(Debug, Clone)] -// pub struct Peer { -// endpoint: Endpoint, -// engine: Engine, -// accept_task: Arc>>>>, -// } - -// impl Peer { -// pub async fn spawn( -// secret_key: iroh_net::key::SecretKey, -// accept_opts: AcceptOpts, -// ) -> Result { -// let endpoint = Endpoint::builder() -// .secret_key(secret_key) -// .alpns(vec![ALPN.to_vec()]) -// .bind(0) -// .await?; -// let payloads = iroh_blobs::store::mem::Store::default(); -// let create_store = move || iroh_willow::store::memory::Store::new(payloads); -// let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); -// let accept_task = tokio::task::spawn({ -// let engine = engine.clone(); -// let endpoint = endpoint.clone(); -// async move { -// while let Some(mut conn) = endpoint.accept().await { -// let Ok(alpn) = conn.alpn().await else { -// continue; -// }; -// if alpn != ALPN { -// continue; -// } -// let Ok(conn) = conn.await else { -// continue; -// }; -// engine.handle_connection(conn).await?; -// } -// Result::Ok(()) -// } -// }); -// Ok(Self { -// endpoint, -// engine, -// accept_task: Arc::new(Mutex::new(Some(accept_task))), -// }) -// } - -// pub async fn shutdown(self) -> Result<()> { -// let accept_task = self.accept_task.lock().unwrap().take(); -// if let Some(accept_task) = accept_task { -// accept_task.abort(); -// match accept_task.await { -// Err(err) if err.is_cancelled() => {} -// Ok(Ok(())) => {} -// Err(err) => Err(err)?, -// Ok(Err(err)) => Err(err)?, -// } -// } -// self.engine.shutdown().await?; -// self.endpoint.close(0u8.into(), b"").await?; -// Ok(()) -// } - -// pub fn node_id(&self) -> NodeId { -// self.endpoint.node_id() -// } -// } - -// impl std::ops::Deref for Peer { -// type Target = Engine; -// fn deref(&self) -> &Self::Target { -// &self.engine -// } -// } - -// pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { -// let peers = [ -// iroh_net::key::SecretKey::generate_with_rng(rng), -// iroh_net::key::SecretKey::generate_with_rng(rng), -// ] -// .map(|secret_key| Peer::spawn(secret_key, Default::default())) -// .try_join() -// .await?; - -// peers[0] -// .endpoint -// .add_node_addr(peers[1].endpoint.node_addr().await?)?; - -// peers[1] -// .endpoint -// .add_node_addr(peers[0].endpoint.node_addr().await?)?; - -// Ok(peers) -// } - -// pub async fn setup_and_delegate( -// alfie: &Engine, -// betty: &Engine, -// ) -> Result<(NamespaceId, UserId, UserId)> { -// let user_alfie = alfie.create_user().await?; -// let user_betty = betty.create_user().await?; - -// let namespace_id = alfie -// .create_namespace(NamespaceKind::Owned, user_alfie) -// .await?; - -// let cap_for_betty = alfie -// .delegate_caps( -// CapSelector::widest(namespace_id), -// AccessMode::ReadWrite, -// DelegateTo::new(user_betty, RestrictArea::None), -// ) -// .await?; - -// betty.import_caps(cap_for_betty).await?; -// Ok((namespace_id, user_alfie, user_betty)) -// } - -// pub async fn insert( -// handle: &Engine, -// namespace_id: NamespaceId, -// user: UserId, -// path: &[&[u8]], -// bytes: impl Into, -// ) -> Result<()> { -// let path = Path::new(path)?; -// let entry = EntryForm::new_bytes(namespace_id, path, bytes); -// handle.insert(entry, user).await?; -// Ok(()) -// } -// } +use std::time::Duration; + +use anyhow::Result; +use futures_concurrency::future::TryJoin; +use futures_lite::StreamExt; + +use iroh_willow::{ + interest::{Interests, IntoAreaOfInterest}, proto::{grouping::{Area, AreaExt}, willow::{Path, PathExt}}, session::{ + intents::{Completion, EventKind}, + SessionInit, SessionMode, + } +}; + +use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_two_intents() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_two_intents"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + insert(&betty, namespace, betty_user, &[b"foo", b"1"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"2"], "bar 2").await?; + insert(&betty, namespace, betty_user, &[b"bar", b"3"], "bar 3").await?; + + let task_foo_path = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"foo"]).unwrap(); + + let init = SessionInit::new( + Interests::builder().add_area(namespace, [Area::new_path(path.clone())]), + SessionMode::ReconcileOnce, + ); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::new_full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + let task_bar_path = tokio::task::spawn({ + let alfie = alfie.clone(); + async move { + let path = Path::new(&[b"bar"]).unwrap(); + + let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::ReconcileOnce); + + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::new_full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + } + }); + + task_foo_path.await.unwrap(); + task_bar_path.await.unwrap(); + + // tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + [alfie, betty].map(Peer::shutdown).try_join().await?; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_update_intent() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_update_intent"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + + let path = Path::new(&[b"foo"]).unwrap(); + let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); + let init = SessionInit::new(interests, SessionMode::Live); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::new_full(), + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + let path = Path::new(&[b"bar"]).unwrap(); + let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); + intent.add_interests(interests).await?; + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::new_path(path.clone()).into_area_of_interest() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + intent.close().await; + + assert!(intent.next().await.is_none(),); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +/// Test immediate shutdown. +// TODO: This does not really test much. Used it for log reading of graceful connection termination. +// Not sure where we should expose whether connections closed gracefully or not? +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_immediate() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_shutdown_immediate"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +/// Test shutdown after a timeout. +// TODO: This does not really test much. Used it for log reading of graceful connection termination. +// Not sure where we should expose whether connections closed gracefully or not? +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_shutdown_timeout() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_shutdown_timeout"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (_namespace, _alfie_user, _betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await?; + let completion = intent.complete().await?; + assert_eq!(completion, Completion::Complete); + tokio::time::sleep(Duration::from_secs(1)).await; + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_twoway_loop() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_twoway_loop"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + insert(&alfie, namespace, alfie_user, &[b"foo"], "foo 1").await?; + insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; + let alfie_node_id = alfie.node_id(); + let betty_node_id = betty.node_id(); + let rounds = 20; + for i in 0..rounds { + println!("\n\nROUND {i} of {rounds}\n\n"); + let alfie = alfie.clone(); + let betty = betty.clone(); + let task_alfie = tokio::task::spawn(async move { + let mut intent = alfie + .sync_with_peer(betty_node_id, SessionInit::reconcile_once(Interests::all())) + .await + .unwrap(); + let completion = intent.complete().await.expect("failed to complete intent"); + assert_eq!(completion, Completion::Complete); + }); + + let task_betty = tokio::task::spawn(async move { + let mut intent = betty + .sync_with_peer(alfie_node_id, SessionInit::reconcile_once(Interests::all())) + .await + .unwrap(); + let completion = intent.complete().await.expect("failed to complete intent"); + assert_eq!(completion, Completion::Complete); + }); + task_alfie.await.unwrap(); + task_betty.await.unwrap(); + } + [alfie, betty].map(Peer::shutdown).try_join().await?; + Ok(()) +} + +mod util { + use std::sync::{Arc, Mutex}; + + use anyhow::Result; + use bytes::Bytes; + use futures_concurrency::future::TryJoin; + use iroh_net::{Endpoint, NodeId}; + use rand::SeedableRng; + use rand_chacha::ChaCha12Rng; + use rand_core::CryptoRngCore; + use tokio::task::JoinHandle; + + use iroh_willow::{ + engine::{AcceptOpts, Engine}, form::EntryForm, interest::{CapSelector, DelegateTo, RestrictArea}, net::ALPN, proto::{ + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + willow::{Path, PathExt}, + } + }; + + pub fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + + #[derive(Debug, Clone)] + pub struct Peer { + endpoint: Endpoint, + engine: Engine, + accept_task: Arc>>>>, + } + + impl Peer { + pub async fn spawn( + secret_key: iroh_net::key::SecretKey, + accept_opts: AcceptOpts, + ) -> Result { + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let payloads = iroh_blobs::store::mem::Store::default(); + let create_store = move || iroh_willow::store::memory::Store::new(payloads); + let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); + let accept_task = tokio::task::spawn({ + let engine = engine.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let Ok(alpn) = conn.alpn().await else { + continue; + }; + if alpn != ALPN { + continue; + } + let Ok(conn) = conn.await else { + continue; + }; + engine.handle_connection(conn).await?; + } + Result::Ok(()) + } + }); + Ok(Self { + endpoint, + engine, + accept_task: Arc::new(Mutex::new(Some(accept_task))), + }) + } + + pub async fn shutdown(self) -> Result<()> { + let accept_task = self.accept_task.lock().unwrap().take(); + if let Some(accept_task) = accept_task { + accept_task.abort(); + match accept_task.await { + Err(err) if err.is_cancelled() => {} + Ok(Ok(())) => {} + Err(err) => Err(err)?, + Ok(Err(err)) => Err(err)?, + } + } + self.engine.shutdown().await?; + self.endpoint.close(0u8.into(), b"").await?; + Ok(()) + } + + pub fn node_id(&self) -> NodeId { + self.endpoint.node_id() + } + } + + impl std::ops::Deref for Peer { + type Target = Engine; + fn deref(&self) -> &Self::Target { + &self.engine + } + } + + pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { + let peers = [ + iroh_net::key::SecretKey::generate_with_rng(rng), + iroh_net::key::SecretKey::generate_with_rng(rng), + ] + .map(|secret_key| Peer::spawn(secret_key, Default::default())) + .try_join() + .await?; + + peers[0] + .endpoint + .add_node_addr(peers[1].endpoint.node_addr().await?)?; + + peers[1] + .endpoint + .add_node_addr(peers[0].endpoint.node_addr().await?)?; + + Ok(peers) + } + + pub async fn setup_and_delegate( + alfie: &Engine, + betty: &Engine, + ) -> Result<(NamespaceId, UserId, UserId)> { + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, RestrictArea::None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + Ok((namespace_id, user_alfie, user_betty)) + } + + pub async fn insert( + handle: &Engine, + namespace_id: NamespaceId, + user: UserId, + path: &[&[u8]], + bytes: impl Into, + ) -> Result<()> { + let path = Path::new(path)?; + let entry = EntryForm::new_bytes(namespace_id, path, bytes); + handle.insert(entry, user).await?; + Ok(()) + } +} From 0b5456d496f69e143297dfb8935392a8c824f129 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:10:39 +0200 Subject: [PATCH 122/198] fix: cargo fix --- iroh-willow/src/proto/data_model.rs | 1 - iroh-willow/src/session/error.rs | 2 +- iroh-willow/src/session/pai_finder.rs | 2 +- iroh-willow/src/store/auth.rs | 2 +- iroh-willow/src/store/memory.rs | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index ba1de841d6..1561a4eaa0 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -3,7 +3,6 @@ use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; use willow_data_model::{AuthorisationToken as _, InvalidPathError}; use willow_encoding::sync::{Decodable, Encodable}; -use crate::proto::keys::UserSignature; use super::{ keys, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 9eff5f0c31..28f3129231 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -2,7 +2,7 @@ use ed25519_dalek::SignatureError; use crate::{ proto::{ - meadowcap::{self, UserId}, + meadowcap::{UserId}, sync::ResourceHandle, willow::Unauthorised, }, diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 86615b7774..1a63609f01 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -506,7 +506,7 @@ mod tests { use crate::{ proto::{ grouping::{Area, AreaSubspace}, - keys::{NamespaceKind, NamespaceSecretKey, UserId, UserPublicKey, UserSecretKey}, + keys::{NamespaceKind, NamespaceSecretKey, UserId, UserSecretKey}, meadowcap::ReadAuthorisation, wgps::{ IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index b74d9129f6..2dd8f86380 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -12,7 +12,7 @@ use crate::{ }, proto::{ data_model::WriteCapability, - grouping::{Area, AreaOfInterest, AreaOfInterestExt}, + grouping::{AreaOfInterest}, keys::{NamespaceId, UserId}, meadowcap::{AccessMode, FailedDelegationError, McCapability, ReadAuthorisation}, }, diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index a630745ebd..7b3dba404d 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -10,7 +10,7 @@ use crate::{ data_model::{AuthorisedEntry, Entry, EntryExt, WriteCapability}, grouping::{Range, RangeEnd, ThreeDRange}, keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, - meadowcap::{self, is_wider_than, ReadAuthorisation, AccessMode}, + meadowcap::{self, is_wider_than, ReadAuthorisation}, wgps::Fingerprint, }, store::traits::{self, RangeSplit, SplitAction, SplitOpts}, From 1eed65dded8d6127165784924d3518956ea9abc7 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:12:39 +0200 Subject: [PATCH 123/198] chore: clippy fix --- iroh-willow/src/engine/peer_manager.rs | 8 ++++---- iroh-willow/src/proto/data_model.rs | 2 +- iroh-willow/src/proto/grouping.rs | 26 ++++++++++++------------- iroh-willow/src/proto/keys.rs | 5 +---- iroh-willow/src/proto/meadowcap.rs | 4 ++-- iroh-willow/src/proto/pai.rs | 1 - iroh-willow/src/session/aoi_finder.rs | 2 +- iroh-willow/src/session/capabilities.rs | 4 ++-- iroh-willow/src/session/intents.rs | 3 +-- iroh-willow/src/session/pai_finder.rs | 2 ++ iroh-willow/src/session/reconciler.rs | 4 ++-- iroh-willow/src/store/auth.rs | 4 ++-- iroh-willow/src/store/entry.rs | 2 +- iroh-willow/src/store/memory.rs | 4 ++-- 14 files changed, 33 insertions(+), 38 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 4ebe426bdc..957e934a9e 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -287,7 +287,7 @@ impl PeerManager { channel_streams, }) }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); peer_info.abort_handle = Some(abort_handle); } } @@ -329,7 +329,7 @@ impl PeerManager { channel_streams, }) }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); peer_info.abort_handle = Some(abort_handle); peer_info.state = PeerState::Pending { intents: vec![intent], @@ -518,7 +518,7 @@ impl PeerManager { let session_handle = self.actor.init_session(conn_handle, intents).await?; let fut = fut.map_ok(move |()| ConnStep::Done { conn }); - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); let SessionHandle { cancel_token, @@ -542,7 +542,7 @@ impl PeerManager { let error = terminate_gracefully(&conn, me, peer, we_cancelled).await?; Ok(ConnStep::Closed { conn, error }) }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); if let PeerState::Closing { .. } = &peer_info.state { peer_info.abort_handle = Some(abort_handle); } else { diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 1561a4eaa0..be208457a7 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -112,7 +112,7 @@ impl PathExt for Path { fn new(slices: &[&[u8]]) -> Result { let component_count = slices.len(); let total_len = slices.iter().map(|x| x.len()).sum::(); - let iter = slices.iter().map(|c| Component::new(c)).flatten(); + let iter = slices.iter().filter_map(|c| Component::new(c)); // TODO: Avoid this alloc by adding willow_data_model::Path::try_new_from_iter or such. let mut iter = iter.collect::>().into_iter(); let path = willow_data_model::Path::new_from_iter(total_len, &mut iter)?; diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index cadf82452f..aa92ad833a 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -70,11 +70,11 @@ pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< >; pub trait AreaOfInterestExt { - fn new(area: Area) -> AreaOfInterest; + fn with_area(area: Area) -> AreaOfInterest; } impl AreaOfInterestExt for AreaOfInterest { - fn new(area: Area) -> AreaOfInterest { + fn with_area(area: Area) -> AreaOfInterest { AreaOfInterest { area, max_count: 0, @@ -86,7 +86,7 @@ impl AreaOfInterestExt for AreaOfInterest { pub trait AreaExt { fn includes_point(&self, point: &Point) -> bool; fn new_path(path: Path) -> Area; - fn into_range(&self) -> Range3d; + fn to_range(&self) -> Range3d; } impl AreaExt for Area { @@ -98,7 +98,7 @@ impl AreaExt for Area { Self::new(AreaSubspace::Any, path, Range::full()) } - fn into_range(&self) -> Range3d { + fn to_range(&self) -> Range3d { let subspaces = match self.subspace() { AreaSubspace::Id(id) => match id.successor() { None => Range::new_open(*id), @@ -111,7 +111,7 @@ impl AreaExt for Area { None => Range::new_open(path.clone()), Some(end) => Range::new_closed(path.clone(), end).expect("successor is bigger"), }; - Range3d::new(subspaces, path_range, self.times().clone()) + Range3d::new(subspaces, path_range, *self.times()) } } @@ -154,7 +154,7 @@ impl Point { } pub fn from_entry(entry: &Entry) -> Self { Self { - path: entry.path().clone().into(), + path: entry.path().clone(), timestamp: entry.timestamp(), subspace_id: *entry.subspace_id(), } @@ -206,10 +206,9 @@ pub mod serde_encoding { Deserialize::deserialize(deserializer)?; let decoded_area = { let mut producer = FromSlice::new(&encoded_area); - let decoded = - willow_data_model::grouping::Area::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))?; - decoded + + willow_data_model::grouping::Area::relative_decode(&relative, &mut producer) + .map_err(|err| serde::de::Error::custom(format!("{err}")))? }; let aoi = willow_data_model::grouping::AreaOfInterest { area: decoded_area, @@ -254,10 +253,9 @@ pub mod serde_encoding { let encoded_range: Vec = Deserialize::deserialize(deserializer)?; let decoded_range = { let mut producer = FromSlice::new(&encoded_range); - let decoded = - willow_data_model::grouping::Range3d::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))?; - decoded + + willow_data_model::grouping::Range3d::relative_decode(&relative, &mut producer) + .map_err(|err| serde::de::Error::custom(format!("{err}")))? }; Ok(Self(decoded_range)) } diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index c78c2ded96..30c348042d 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -575,10 +575,7 @@ mod willow_impls { impl willow_data_model::SubspaceId for UserId { fn successor(&self) -> Option { - match increment_by_one(self.as_bytes()) { - Some(bytes) => Some(Self::from_bytes_unchecked(bytes)), - None => None, - } + increment_by_one(self.as_bytes()).map(Self::from_bytes_unchecked) } } diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index b9d1381383..89e3de5dde 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -210,7 +210,7 @@ pub mod serde_encoding { let decoded = { let mut producer = FromSlice::new(&data); let decoded = McCapability::relative_decode(&relative, &mut producer) - .map_err(|e| serde::de::Error::custom(e))?; + .map_err(serde::de::Error::custom)?; Self(decoded) }; Ok(decoded) @@ -242,7 +242,7 @@ pub mod serde_encoding { let decoded = { let mut producer = FromSlice::new(&data); let decoded = McSubspaceCapability::decode(&mut producer) - .map_err(|e| serde::de::Error::custom(e))?; + .map_err(serde::de::Error::custom)?; Self(decoded) }; Ok(decoded) diff --git a/iroh-willow/src/proto/pai.rs b/iroh-willow/src/proto/pai.rs index 600f4704b4..7d7ad15a0c 100644 --- a/iroh-willow/src/proto/pai.rs +++ b/iroh-willow/src/proto/pai.rs @@ -136,7 +136,6 @@ impl FragmentKit { FragmentKit::Complete(namespace_id, path) => { let pairs = path .all_prefixes() - .into_iter() .map(|prefix| (namespace_id, prefix)) .collect(); FragmentSet::Complete(pairs) diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 3b1a49bfa4..67d70deda6 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -154,7 +154,7 @@ impl IntersectionFinder { self.co .yield_(Output::SignAndSendCapability { handle, - capability: authorisation.read_cap().clone().into(), + capability: authorisation.read_cap().clone(), }) .await; } diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index eb4bb6f873..9a9d8bd0b3 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -71,7 +71,7 @@ impl Capabilities { ) -> Result { let inner = self.0.borrow(); let signable = inner.challenge.signable()?; - let signature = secret_store.sign_user(&capability.receiver(), &signable)?; + let signature = secret_store.sign_user(capability.receiver(), &signable)?; Ok(SetupBindReadCapability { capability: capability.into(), handle: intersection_handle, @@ -153,7 +153,7 @@ impl Capabilities { ) -> Result { let inner = self.0.borrow(); let signable = inner.challenge.signable()?; - let signature = secrets.sign_user(&cap.receiver(), &signable)?; + let signature = secrets.sign_user(cap.receiver(), &signable)?; let message = PaiReplySubspaceCapability { handle, capability: cap.clone().into(), diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 903dfa500e..834fc5c9e6 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -302,8 +302,7 @@ impl IntentDispatcher { let event_tx = info.event_tx; let update_rx = self.intent_update_rx.remove(&id); let update_rx = update_rx - .map(|stream| stream.into_inner()) - .flatten() + .and_then(|stream| stream.into_inner()) .map(|stream| stream.into_inner()); let channels = match (event_tx, update_rx) { (Some(event_tx), Some(update_rx)) => Some(IntentChannels { diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 1a63609f01..f96cbd499c 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -3,8 +3,10 @@ //! As defined by the willow spec: [Private Area Intersection](https://willowprotocol.org/specs/pai/index.html) //! //! Partly ported from the implementation in earthstar and willow: +//! //! * https://github.com/earthstar-project/willow-js/blob/0db4b9ec7710fb992ab75a17bd8557040d9a1062/src/wgps/pai/pai_finder.ts //! * https://github.com/earthstar-project/earthstar/blob/16d6d4028c22fdbb72f7395013b29be7dcd9217a/src/schemes/schemes.ts#L662 +//! //! Licensed under LGPL and ported into this MIT/Apache codebase with explicit permission //! from the original author (gwil). diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 754acce9fe..b413a08110 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -409,7 +409,7 @@ impl Target { } async fn initiate(&mut self, shared: &Shared) -> Result<(), Error> { - let range = self.intersection.area().into_range(); + let range = self.intersection.area().to_range(); let fingerprint = self.snapshot.fingerprint(self.namespace(), &range)?; self.send_fingerprint(shared, range, fingerprint, None) .await?; @@ -572,7 +572,7 @@ impl Target { let msg = ReconciliationSendEntry { entry: LengthyEntry::new(entry, available), static_token_handle, - dynamic_token: dynamic_token.into(), + dynamic_token, }; shared.send.send(msg).await?; diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 2dd8f86380..87eeae48a8 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -210,7 +210,7 @@ impl Auth { let user_id = read_cap.receiver(); let user_secret = self .secrets - .get_user(&user_id) + .get_user(user_id) .ok_or(AuthError::MissingUserSecret(*user_id))?; let area = restrict_area.with_default(read_cap.granted_area()); let new_read_cap = read_cap.delegate(&user_secret, &to, &area)?; @@ -242,7 +242,7 @@ impl Auth { let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; let user_secret = self .secrets - .get_user(&cap.receiver()) + .get_user(cap.receiver()) .ok_or(AuthError::MissingUserSecret(*cap.receiver()))?; let area = restrict_area.with_default(cap.granted_area()); let new_cap = cap.delegate(&user_secret, &to, &area)?; diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index 45984591b9..58f215f954 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -148,7 +148,7 @@ impl Broadcaster { } fn broadcast(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) { - let Some(sessions) = self.watched_areas.get_mut(&entry.namespace_id()) else { + let Some(sessions) = self.watched_areas.get_mut(entry.namespace_id()) else { return; }; let mut dropped_receivers = vec![]; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 7b3dba404d..36a38a0b33 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -219,14 +219,14 @@ impl traits::EntryStorage for Rc> { return Ok(false); } if existing.subspace_id() == new.subspace_id() - && existing.path().is_prefix_of(&new.path()) + && existing.path().is_prefix_of(new.path()) && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists return Ok(false); } if new.subspace_id() == existing.subspace_id() - && new.path().is_prefix_of(&existing.path()) + && new.path().is_prefix_of(existing.path()) && new.is_newer_than(existing) { to_remove.push(i); From 33f2fcbbf35d4839393fbc4e03cf79b294465676 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:18:27 +0200 Subject: [PATCH 124/198] cleanup --- iroh-willow/src/net.rs | 10 ++--- iroh-willow/src/proto/data_model.rs | 4 +- iroh-willow/src/proto/grouping.rs | 62 +-------------------------- iroh-willow/src/session/pai_finder.rs | 2 +- iroh-willow/tests/basic.rs | 10 ++--- 5 files changed, 15 insertions(+), 73 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 3242d1a63a..8d230e1fc5 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -481,7 +481,7 @@ mod tests { namespace_id, user_alfie, n_alfie, - |n| Path::new(&[b"alfie", n.to_string().as_bytes()]), + |n| Path::from_bytes(&[b"alfie", n.to_string().as_bytes()]), |n| format!("alfie{n}"), &mut expected_entries, ) @@ -492,7 +492,7 @@ mod tests { namespace_id, user_betty, n_betty, - |n| Path::new(&[b"betty", n.to_string().as_bytes()]), + |n| Path::from_bytes(&[b"betty", n.to_string().as_bytes()]), |n| format!("betty{n}"), &mut expected_entries, ) @@ -618,7 +618,7 @@ mod tests { namespace_id, user_alfie, n_init, - |n| Path::new(&[b"alfie-init", n.to_string().as_bytes()]), + |n| Path::from_bytes(&[b"alfie-init", n.to_string().as_bytes()]), |n| format!("alfie{n}"), &mut expected_entries, ) @@ -629,7 +629,7 @@ mod tests { namespace_id, user_betty, n_init, - |n| Path::new(&[b"betty-init", n.to_string().as_bytes()]), + |n| Path::from_bytes(&[b"betty-init", n.to_string().as_bytes()]), |n| format!("betty{n}"), &mut expected_entries, ) @@ -652,7 +652,7 @@ mod tests { let handle_alfie = handle_alfie.clone(); let count = 3; let content_fn = |i: usize| format!("alfie live {i}"); - let path_fn = |i: usize| Path::new(&[b"alfie-live", i.to_string().as_bytes()]); + let path_fn = |i: usize| Path::from_bytes(&[b"alfie-live", i.to_string().as_bytes()]); let mut track_entries = vec![]; async move { diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index be208457a7..02223b14e9 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -105,11 +105,11 @@ impl From for InvalidPathError2 { } pub trait PathExt { - fn new(slices: &[&[u8]]) -> Result; + fn from_bytes(slices: &[&[u8]]) -> Result; } impl PathExt for Path { - fn new(slices: &[&[u8]]) -> Result { + fn from_bytes(slices: &[&[u8]]) -> Result { let component_count = slices.len(); let total_len = slices.iter().map(|x| x.len()).sum::(); let iter = slices.iter().filter_map(|c| Component::new(c)); diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index aa92ad833a..259f9f6f42 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -5,33 +5,6 @@ use super::data_model::{ Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, }; -// /// A three-dimensional range that includes every [`Entry`] included in all three of its ranges. -// #[derive( -// Debug, Clone, Hash, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, -// )] -// pub struct Three3Range( -// willow_data_model::grouping::Range3d< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// SubspaceId, -// >, -// ); - -/// A grouping of entries. -/// [Definition](https://willowprotocol.org/specs/grouping-entries/index.html#areas). -// #[derive( -// Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, -// )] -// pub struct Area( -// willow_data_model::grouping::Area< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// SubspaceId, -// >, -// ); - pub type Range3d = willow_data_model::grouping::Range3d< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -50,18 +23,6 @@ pub type Area = willow_data_model::grouping::Area< pub type AreaSubspace = willow_data_model::grouping::AreaSubspace; -/// A grouping of [`crate::Entry`]s that are among the newest in some [store](https://willowprotocol.org/specs/data-model/index.html#store). -/// -/// [Definition](https://willowprotocol.org/specs/grouping-entries/index.html#aois). -// #[derive(Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref)] -// pub struct AreaOfInterest( -// willow_data_model::grouping::AreaOfInterest< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// SubspaceId, -// >, -// ); pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -115,25 +76,6 @@ impl AreaExt for Area { } } -// impl Area { -// /// Create a new [`Area`]. -// pub fn new(subspace: AreaSubspace, path: Path, times: Range) -> Self { -// Self(willow_data_model::grouping::Area::new( -// subspace, -// path.into(), -// times, -// )) -// } - -// pub fn includes_point(&self, point: &Point) -> bool { -// self.includes_area(&point.into_area()) -// } - -// pub fn path(path: Path) -> Self { -// Self::new(AreaSubspace::Any, path, Range::full()) -// } -// } - /// A single point in the 3D range space. /// /// I.e. an entry. @@ -273,9 +215,9 @@ mod tests { #[test] fn area_eq() { - let p1 = Path::new(&[b"foo", b"bar"]).unwrap(); + let p1 = Path::from_bytes(&[b"foo", b"bar"]).unwrap(); let a1 = Area::new_path(p1); - let p2 = Path::new(&[b"foo", b"bar"]).unwrap(); + let p2 = Path::from_bytes(&[b"foo", b"bar"]).unwrap(); let a2 = Area::new_path(p2); assert_eq!(a1, a2); let mut set = HashSet::new(); diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index f96cbd499c..a9d302610f 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -581,7 +581,7 @@ mod tests { let betty_area = Area::new( AreaSubspace::Any, - Path::new(&[b"chess"]).unwrap(), + Path::from_bytes(&[b"chess"]).unwrap(), Default::default(), ); let betty_auth = root_auth diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index cc509a1375..e2d5c723f3 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -29,7 +29,7 @@ async fn peer_manager_two_intents() -> Result<()> { let task_foo_path = tokio::task::spawn({ let alfie = alfie.clone(); async move { - let path = Path::new(&[b"foo"]).unwrap(); + let path = Path::from_bytes(&[b"foo"]).unwrap(); let init = SessionInit::new( Interests::builder().add_area(namespace, [Area::new_path(path.clone())]), @@ -70,7 +70,7 @@ async fn peer_manager_two_intents() -> Result<()> { let task_bar_path = tokio::task::spawn({ let alfie = alfie.clone(); async move { - let path = Path::new(&[b"bar"]).unwrap(); + let path = Path::from_bytes(&[b"bar"]).unwrap(); let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); @@ -129,7 +129,7 @@ async fn peer_manager_update_intent() -> Result<()> { insert(&betty, namespace, betty_user, &[b"foo"], "foo 1").await?; insert(&betty, namespace, betty_user, &[b"bar"], "bar 1").await?; - let path = Path::new(&[b"foo"]).unwrap(); + let path = Path::from_bytes(&[b"foo"]).unwrap(); let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); let init = SessionInit::new(interests, SessionMode::Live); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); @@ -157,7 +157,7 @@ async fn peer_manager_update_intent() -> Result<()> { ); assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); - let path = Path::new(&[b"bar"]).unwrap(); + let path = Path::from_bytes(&[b"bar"]).unwrap(); let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); intent.add_interests(interests).await?; @@ -416,7 +416,7 @@ mod util { path: &[&[u8]], bytes: impl Into, ) -> Result<()> { - let path = Path::new(path)?; + let path = Path::from_bytes(path)?; let entry = EntryForm::new_bytes(namespace_id, path, bytes); handle.insert(entry, user).await?; Ok(()) From c2d0015491f1f04ae60d3c6b4455a8dd0728363b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:26:12 +0200 Subject: [PATCH 125/198] fix: cleanup imports --- iroh-willow/src/engine/actor.rs | 2 +- iroh-willow/src/form.rs | 2 +- iroh-willow/src/net.rs | 4 +-- iroh-willow/src/proto.rs | 2 -- iroh-willow/src/proto/meadowcap.rs | 2 +- iroh-willow/src/proto/wgps/messages.rs | 2 +- iroh-willow/src/session/aoi_finder.rs | 2 +- iroh-willow/src/session/capabilities.rs | 2 +- iroh-willow/src/session/channels.rs | 2 +- iroh-willow/src/session/error.rs | 4 +-- iroh-willow/src/session/pai_finder.rs | 6 ++--- iroh-willow/src/session/payload.rs | 2 +- iroh-willow/src/session/reconciler.rs | 33 ++++++++++++------------ iroh-willow/src/session/resource.rs | 2 +- iroh-willow/src/session/run.rs | 4 +-- iroh-willow/src/session/static_tokens.rs | 4 +-- iroh-willow/src/store.rs | 2 +- iroh-willow/src/store/entry.rs | 2 +- iroh-willow/src/store/traits.rs | 4 +-- 19 files changed, 41 insertions(+), 42 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 863444b64d..5ecf3d0de9 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -18,7 +18,7 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, - willow::{AuthorisedEntry, Entry}, + data_model::{AuthorisedEntry, Entry}, }, session::{intents::Intent, run_session, Error, EventSender, SessionHandle}, store::{ diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 83c0d084e9..80a8081d48 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -17,7 +17,7 @@ use tokio::io::AsyncRead; use crate::proto::{ data_model::SerdeWriteCapability, keys::UserId, - willow::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, + data_model::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, }; /// Sources where payload data can come from. diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 8d230e1fc5..af701f405b 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -9,7 +9,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{debug, trace}; use crate::{ - proto::sync::{ + proto::wgps::{ AccessChallenge, ChallengeHash, Channel, LogicalChannel, Message, CHALLENGE_HASH_LENGTH, MAX_PAYLOAD_SIZE_POWER, }, @@ -404,7 +404,7 @@ mod tests { grouping::ThreeDRange, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, - sync::AccessChallenge, + wgps::AccessChallenge, }, session::{intents::Intent, Role, SessionHandle, SessionInit, SessionMode}, }; diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index 0fca113b11..da67bdad63 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -3,6 +3,4 @@ pub mod grouping; pub mod keys; pub mod meadowcap; pub mod pai; -pub mod sync; pub mod wgps; -pub use self::data_model as willow; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 89e3de5dde..79c1d5a67d 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -266,7 +266,7 @@ pub fn is_wider_than(a: &McCapability, b: &McCapability) -> bool { // use super::{ // grouping::{Area, AreaInArea}, // keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH}, -// willow::{AuthorisedEntry, Entry, Unauthorised}, +// data_model::{AuthorisedEntry, Entry, Unauthorised}, // }; // pub type UserPublicKey = keys::UserPublicKey; diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index 2fc1a41f91..a08ede6f3f 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -11,7 +11,7 @@ use crate::{ }, meadowcap::{self}, wgps::AccessChallenge, - willow::Entry, + data_model::Entry, }, util::codec::{DecodeOutcome, Decoder, Encoder}, }; diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 67d70deda6..63ad667bd0 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -9,7 +9,7 @@ use crate::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, meadowcap::{ReadAuthorisation, ReadCapability}, - sync::{ + wgps::{ AreaOfInterestHandle, CapabilityHandle, IntersectionHandle, SetupBindAreaOfInterest, }, }, diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index 9a9d8bd0b3..ef19cf6c80 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -9,7 +9,7 @@ use crate::{ proto::{ keys::UserSignature, meadowcap::{ReadCapability, SubspaceCapability}, - sync::{ + wgps::{ AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, IntersectionHandle, PaiReplySubspaceCapability, SetupBindReadCapability, }, diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 694fbe5ae7..874e53ade0 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -9,7 +9,7 @@ use futures_lite::Stream; use tracing::trace; use crate::{ - proto::sync::{ + proto::wgps::{ Channel, DataMessage, IntersectionMessage, LogicalChannel, Message, ReconciliationMessage, SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, }, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 28f3129231..5919225402 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -3,8 +3,8 @@ use ed25519_dalek::SignatureError; use crate::{ proto::{ meadowcap::{UserId}, - sync::ResourceHandle, - willow::Unauthorised, + wgps::ResourceHandle, + data_model::Unauthorised, }, session::{pai_finder::PaiError, resource::MissingResource}, store::traits::SecretStoreError, diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index a9d302610f..ffe2894968 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -22,11 +22,11 @@ use crate::{ grouping::AreaSubspace, meadowcap::{ReadAuthorisation, SubspaceCapability}, pai::{Fragment, FragmentKind, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, - sync::{ + wgps::{ IntersectionHandle, IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, PaiRequestSubspaceCapability, }, - willow::{NamespaceId, Path}, + data_model::{NamespaceId, Path}, }, session::{ resource::{MissingResource, ResourceMap}, @@ -514,7 +514,7 @@ mod tests { IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, PaiRequestSubspaceCapability, }, - willow::{Path, PathExt}, + data_model::{Path, PathExt}, }, session::{pai_finder::PaiIntersection, Error}, }; diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index dae6e8c17c..5b20c7b4bc 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -7,7 +7,7 @@ use iroh_blobs::{ }; use crate::{ - proto::{sync::Message, willow::PayloadDigest}, + proto::{wgps::Message, data_model::PayloadDigest}, session::channels::ChannelSenders, }; diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index b413a08110..751e8e9cb7 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -10,31 +10,17 @@ use iroh_blobs::store::Store as PayloadStore; use tokio_stream::wrappers::ReceiverStream; use tracing::{debug, trace}; -#[derive(Debug)] -pub enum Input { - AoiIntersection(AoiIntersection), -} - -#[derive(Debug)] -pub enum Output { - ReconciledArea { - namespace: NamespaceId, - area: AreaOfInterest, - }, - ReconciledAll, -} - use crate::{ proto::{ grouping::{AreaExt, AreaOfInterest, ThreeDRange}, keys::NamespaceId, - sync::{ + wgps::{ AreaOfInterestHandle, Fingerprint, IsHandle, LengthyEntry, ReconciliationAnnounceEntries, ReconciliationMessage, ReconciliationSendEntry, ReconciliationSendFingerprint, ReconciliationSendPayload, ReconciliationTerminatePayload, }, - willow::PayloadDigest, + data_model::PayloadDigest, }, session::{ aoi_finder::AoiIntersection, @@ -51,6 +37,21 @@ use crate::{ util::{gen_stream::GenStream, stream::Cancelable}, }; +#[derive(Debug)] +pub enum Input { + AoiIntersection(AoiIntersection), +} + +#[derive(Debug)] +pub enum Output { + ReconciledArea { + namespace: NamespaceId, + area: AreaOfInterest, + }, + ReconciledAll, +} + + #[derive(derive_more::Debug)] pub struct Reconciler { shared: Shared, diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index 0523472236..f5a1c84bee 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -3,7 +3,7 @@ use std::{ task::{Context, Poll, Waker}, }; -use crate::proto::sync::{IsHandle, ResourceHandle}; +use crate::proto::wgps::{IsHandle, ResourceHandle}; use super::Error; diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 9b4e88a1e8..30b4346591 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -11,8 +11,8 @@ use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ net::ConnHandle, proto::{ - sync::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, - willow::Unauthorised, + wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, + data_model::Unauthorised, }, session::{ aoi_finder::{self, IntersectionFinder}, diff --git a/iroh-willow/src/session/static_tokens.rs b/iroh-willow/src/session/static_tokens.rs index 7856f36220..15a3b8e50b 100644 --- a/iroh-willow/src/session/static_tokens.rs +++ b/iroh-willow/src/session/static_tokens.rs @@ -7,8 +7,8 @@ use std::{ use crate::{ proto::{ - sync::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, - willow::{AuthorisationToken, AuthorisedEntry, Entry}, + wgps::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, + data_model::{AuthorisationToken, AuthorisedEntry, Entry}, }, session::{channels::ChannelSenders, resource::ResourceMap, Error}, }; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index d12779cd8e..93df0ccbd7 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -7,7 +7,7 @@ use crate::{ proto::{ data_model::{AuthorisedEntry, PayloadDigest}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, - willow::Entry, + data_model::Entry, }, store::traits::SecretStorage, util::time::system_time_now, diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index 58f215f954..01c495663f 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -6,7 +6,7 @@ use tokio::sync::broadcast; use crate::proto::{ grouping::Area, - willow::{AuthorisedEntry, NamespaceId}, + data_model::{AuthorisedEntry, NamespaceId}, }; pub type SessionId = u64; diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 28bbd6d90f..7ed3d68290 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -8,8 +8,8 @@ use crate::{ grouping::ThreeDRange, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self, ReadAuthorisation}, - sync::Fingerprint, - willow::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, + wgps::Fingerprint, + data_model::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, }, }; From c09f9305715cc88cf536dbc61c7f1fb0e0ce436d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:58:08 +0200 Subject: [PATCH 126/198] chore: change cargo patch --- Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2827b99e6c..02836da9de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,9 +43,9 @@ missing_debug_implementations = "warn" unused-async = "warn" [patch.crates-io] -willow-data-model = { path = "../willow-rs/data-model" } -willow-encoding = { path = "../willow-rs/encoding" } -meadowcap = { path = "../willow-rs/meadowcap" } -# willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -# willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -# meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +# willow-data-model = { path = "../willow-rs/data-model" } +# willow-encoding = { path = "../willow-rs/encoding" } +# meadowcap = { path = "../willow-rs/meadowcap" } +willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } From 95b3a19ccbaca961134e3b132bb04d61cdb6918c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 12 Aug 2024 17:59:26 +0200 Subject: [PATCH 127/198] chore: cleanup --- Cargo.lock | 3 +++ iroh-willow/src/engine/actor.rs | 8 ++++---- iroh-willow/src/form.rs | 2 +- iroh-willow/src/net.rs | 4 ++-- iroh-willow/src/proto/data_model.rs | 1 - iroh-willow/src/proto/grouping.rs | 10 ++++------ iroh-willow/src/proto/wgps/messages.rs | 2 +- iroh-willow/src/session/error.rs | 6 +----- iroh-willow/src/session/pai_finder.rs | 4 ++-- iroh-willow/src/session/payload.rs | 2 +- iroh-willow/src/session/reconciler.rs | 9 ++++----- iroh-willow/src/session/run.rs | 2 +- iroh-willow/src/session/static_tokens.rs | 2 +- iroh-willow/src/store.rs | 2 +- iroh-willow/src/store/auth.rs | 9 +++++++-- iroh-willow/src/store/entry.rs | 2 +- iroh-willow/src/store/memory.rs | 22 +++++++++++----------- iroh-willow/src/store/traits.rs | 16 ++++++++-------- iroh-willow/tests/basic.rs | 20 +++++++++++++++----- 19 files changed, 68 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d9779ed00..be5b667d25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3238,6 +3238,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "either", "signature", @@ -6637,6 +6638,7 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "bytes", "either", @@ -6648,6 +6650,7 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "either", "syncify", diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 5ecf3d0de9..de5eba52fc 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -15,10 +15,10 @@ use crate::{ interest::{CapSelector, CapabilityPack, DelegateTo, InterestMap, Interests}, net::ConnHandle, proto::{ - grouping::ThreeDRange, + data_model::{AuthorisedEntry, Entry}, + grouping::Range3d, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, - data_model::{AuthorisedEntry, Entry}, }, session::{intents::Intent, run_session, Error, EventSender, SessionHandle}, store::{ @@ -123,7 +123,7 @@ impl ActorHandle { pub async fn get_entries( &self, namespace: NamespaceId, - range: ThreeDRange, + range: Range3d, ) -> Result>> { let (tx, rx) = flume::bounded(1024); self.send(Input::GetEntries { @@ -230,7 +230,7 @@ pub enum Input { }, GetEntries { namespace: NamespaceId, - range: ThreeDRange, + range: Range3d, #[debug(skip)] reply: flume::Sender>, }, diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 80a8081d48..3866a7dd7a 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -16,8 +16,8 @@ use tokio::io::AsyncRead; use crate::proto::{ data_model::SerdeWriteCapability, - keys::UserId, data_model::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, + keys::UserId, }; /// Sources where payload data can come from. diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index af701f405b..04ecdb636a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -401,7 +401,7 @@ mod tests { net::{terminate_gracefully, ConnHandle}, proto::{ data_model::{Entry, InvalidPathError2, Path, PathExt}, - grouping::ThreeDRange, + grouping::Range3d, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, wgps::AccessChallenge, @@ -774,7 +774,7 @@ mod tests { async fn get_entries(store: &ActorHandle, namespace: NamespaceId) -> Result> { let entries: Result> = store - .get_entries(namespace, ThreeDRange::new_full()) + .get_entries(namespace, Range3d::new_full()) .await? .try_collect() .await; diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 02223b14e9..55df4bf3ea 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -3,7 +3,6 @@ use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; use willow_data_model::{AuthorisationToken as _, InvalidPathError}; use willow_encoding::sync::{Decodable, Encodable}; - use super::{ keys, meadowcap::{self}, diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 259f9f6f42..96384ee4a0 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -12,8 +12,6 @@ pub type Range3d = willow_data_model::grouping::Range3d< SubspaceId, >; -pub type ThreeDRange = Range3d; - pub type Area = willow_data_model::grouping::Area< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -148,9 +146,9 @@ pub mod serde_encoding { Deserialize::deserialize(deserializer)?; let decoded_area = { let mut producer = FromSlice::new(&encoded_area); - + willow_data_model::grouping::Area::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))? + .map_err(|err| serde::de::Error::custom(format!("{err}")))? }; let aoi = willow_data_model::grouping::AreaOfInterest { area: decoded_area, @@ -195,9 +193,9 @@ pub mod serde_encoding { let encoded_range: Vec = Deserialize::deserialize(deserializer)?; let decoded_range = { let mut producer = FromSlice::new(&encoded_range); - + willow_data_model::grouping::Range3d::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))? + .map_err(|err| serde::de::Error::custom(format!("{err}")))? }; Ok(Self(decoded_range)) } diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index a08ede6f3f..d54d26b2a2 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -5,13 +5,13 @@ use serde::{Deserialize, Serialize}; use crate::{ proto::{ data_model::serde_encoding::SerdeEntry, + data_model::Entry, grouping::{ serde_encoding::{SerdeAreaOfInterest, SerdeRange3d}, Area, }, meadowcap::{self}, wgps::AccessChallenge, - data_model::Entry, }, util::codec::{DecodeOutcome, Decoder, Encoder}, }; diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 5919225402..2448e2c264 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,11 +1,7 @@ use ed25519_dalek::SignatureError; use crate::{ - proto::{ - meadowcap::{UserId}, - wgps::ResourceHandle, - data_model::Unauthorised, - }, + proto::{data_model::Unauthorised, meadowcap::UserId, wgps::ResourceHandle}, session::{pai_finder::PaiError, resource::MissingResource}, store::traits::SecretStoreError, util::channel::{ReadError, WriteError}, diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index ffe2894968..f337939380 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -19,6 +19,7 @@ use tracing::{debug, trace}; use crate::{ proto::{ + data_model::{NamespaceId, Path}, grouping::AreaSubspace, meadowcap::{ReadAuthorisation, SubspaceCapability}, pai::{Fragment, FragmentKind, FragmentSet, PaiScheme, PsiGroup, PsiScalar}, @@ -26,7 +27,6 @@ use crate::{ IntersectionHandle, IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, PaiRequestSubspaceCapability, }, - data_model::{NamespaceId, Path}, }, session::{ resource::{MissingResource, ResourceMap}, @@ -507,6 +507,7 @@ mod tests { use crate::{ proto::{ + data_model::{Path, PathExt}, grouping::{Area, AreaSubspace}, keys::{NamespaceKind, NamespaceSecretKey, UserId, UserSecretKey}, meadowcap::ReadAuthorisation, @@ -514,7 +515,6 @@ mod tests { IntersectionMessage, Message, PaiBindFragment, PaiReplyFragment, PaiRequestSubspaceCapability, }, - data_model::{Path, PathExt}, }, session::{pai_finder::PaiIntersection, Error}, }; diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index 5b20c7b4bc..6c6565c072 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -7,7 +7,7 @@ use iroh_blobs::{ }; use crate::{ - proto::{wgps::Message, data_model::PayloadDigest}, + proto::{data_model::PayloadDigest, wgps::Message}, session::channels::ChannelSenders, }; diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 751e8e9cb7..173940fd50 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -12,7 +12,8 @@ use tracing::{debug, trace}; use crate::{ proto::{ - grouping::{AreaExt, AreaOfInterest, ThreeDRange}, + data_model::PayloadDigest, + grouping::{AreaExt, AreaOfInterest, Range3d}, keys::NamespaceId, wgps::{ AreaOfInterestHandle, Fingerprint, IsHandle, LengthyEntry, @@ -20,7 +21,6 @@ use crate::{ ReconciliationSendFingerprint, ReconciliationSendPayload, ReconciliationTerminatePayload, }, - data_model::PayloadDigest, }, session::{ aoi_finder::AoiIntersection, @@ -51,7 +51,6 @@ pub enum Output { ReconciledAll, } - #[derive(derive_more::Debug)] pub struct Reconciler { shared: Shared, @@ -512,7 +511,7 @@ impl Target { async fn send_fingerprint( &mut self, shared: &Shared, - range: ThreeDRange, + range: Range3d, fingerprint: Fingerprint, covers: Option, ) -> anyhow::Result<()> { @@ -531,7 +530,7 @@ impl Target { async fn announce_and_send_entries( &mut self, shared: &Shared, - range: &ThreeDRange, + range: &Range3d, want_response: bool, covers: Option, our_entry_count: Option, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 30b4346591..a0d9199f28 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -11,8 +11,8 @@ use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ net::ConnHandle, proto::{ - wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, data_model::Unauthorised, + wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, }, session::{ aoi_finder::{self, IntersectionFinder}, diff --git a/iroh-willow/src/session/static_tokens.rs b/iroh-willow/src/session/static_tokens.rs index 15a3b8e50b..16d5cc3053 100644 --- a/iroh-willow/src/session/static_tokens.rs +++ b/iroh-willow/src/session/static_tokens.rs @@ -7,8 +7,8 @@ use std::{ use crate::{ proto::{ - wgps::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, data_model::{AuthorisationToken, AuthorisedEntry, Entry}, + wgps::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, }, session::{channels::ChannelSenders, resource::ResourceMap, Error}, }; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 93df0ccbd7..ad88acb4cb 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -5,9 +5,9 @@ use crate::{ form::{AuthForm, EntryForm, EntryOrForm, SubspaceForm, TimestampForm}, interest::{CapSelector, ReceiverSelector}, proto::{ + data_model::Entry, data_model::{AuthorisedEntry, PayloadDigest}, keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, - data_model::Entry, }, store::traits::SecretStorage, util::time::system_time_now, diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 87eeae48a8..5ef9c465fa 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -12,7 +12,7 @@ use crate::{ }, proto::{ data_model::WriteCapability, - grouping::{AreaOfInterest}, + grouping::AreaOfInterest, keys::{NamespaceId, UserId}, meadowcap::{AccessMode, FailedDelegationError, McCapability, ReadAuthorisation}, }, @@ -167,7 +167,12 @@ impl Auth { .secrets .get_namespace(&namespace_key) .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; - McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Write)? + McCapability::new_owned( + namespace_key, + &namespace_secret, + user_key, + AccessMode::Write, + )? }; let pack = CapabilityPack::Write(cap.into()); Ok(pack) diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index 01c495663f..f5b3e21c16 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -5,8 +5,8 @@ use std::{ use tokio::sync::broadcast; use crate::proto::{ - grouping::Area, data_model::{AuthorisedEntry, NamespaceId}, + grouping::Area, }; pub type SessionId = u64; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 36a38a0b33..6db5e50754 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -8,7 +8,7 @@ use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ data_model::{AuthorisedEntry, Entry, EntryExt, WriteCapability}, - grouping::{Range, RangeEnd, ThreeDRange}, + grouping::{Range, Range3d, RangeEnd}, keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, meadowcap::{self, is_wider_than, ReadAuthorisation}, wgps::Fingerprint, @@ -94,7 +94,7 @@ pub struct EntryStore { // impl + 'static> ReadonlyStore for T { impl traits::EntryReader for Rc> { - fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result { let mut fingerprint = Fingerprint::default(); for entry in self.get_entries(namespace, range) { let entry = entry?; @@ -106,7 +106,7 @@ impl traits::EntryReader for Rc> { fn split_range( &self, namespace: NamespaceId, - range: &ThreeDRange, + range: &Range3d, config: &SplitOpts, ) -> Result>> { let count = self.get_entries(namespace, range).count(); @@ -127,12 +127,12 @@ impl traits::EntryReader for Rc> { let mut ranges = vec![]; // split in two halves by subspace if *mid.subspace_id() != range.subspaces().start { - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( Range::new_closed(range.subspaces().start, *mid.subspace_id()).unwrap(), range.paths().clone(), *range.times(), )); - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( Range::new(*mid.subspace_id(), range.subspaces().end), range.paths().clone(), *range.times(), @@ -140,7 +140,7 @@ impl traits::EntryReader for Rc> { } // split by path else if *mid.path() != range.paths().start { - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( *range.subspaces(), Range::new( range.paths().start.clone(), @@ -148,19 +148,19 @@ impl traits::EntryReader for Rc> { ), *range.times(), )); - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( *range.subspaces(), Range::new(mid.path().clone(), range.paths().end.clone()), *range.times(), )); // split by time } else { - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( *range.subspaces(), range.paths().clone(), Range::new(range.times().start, RangeEnd::Closed(mid.timestamp())), )); - ranges.push(ThreeDRange::new( + ranges.push(Range3d::new( *range.subspaces(), range.paths().clone(), Range::new(mid.timestamp(), range.times().end), @@ -174,14 +174,14 @@ impl traits::EntryReader for Rc> { Ok(out.into_iter()) } - fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result { + fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result { Ok(self.get_entries(namespace, range).count() as u64) } fn get_entries_with_authorisation<'a>( &'a self, namespace: NamespaceId, - range: &ThreeDRange, + range: &Range3d, ) -> impl Iterator> + 'a { let slf = self.borrow(); slf.entries diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 7ed3d68290..be5235a59f 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -5,11 +5,11 @@ use anyhow::Result; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ - grouping::ThreeDRange, + data_model::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, + grouping::Range3d, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self, ReadAuthorisation}, wgps::Fingerprint, - data_model::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, }, }; @@ -79,27 +79,27 @@ pub trait EntryStorage: EntryReader + Clone + Debug + 'static { } pub trait EntryReader: Debug + 'static { - fn fingerprint(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result; fn split_range( &self, namespace: NamespaceId, - range: &ThreeDRange, + range: &Range3d, config: &SplitOpts, ) -> Result>>; - fn count(&self, namespace: NamespaceId, range: &ThreeDRange) -> Result; + fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result; fn get_entries_with_authorisation<'a>( &'a self, namespace: NamespaceId, - range: &ThreeDRange, + range: &Range3d, ) -> impl Iterator> + 'a; fn get_entries( &self, namespace: NamespaceId, - range: &ThreeDRange, + range: &Range3d, ) -> impl Iterator> { self.get_entries_with_authorisation(namespace, range) .map(|e| e.map(|e| e.0)) @@ -120,7 +120,7 @@ pub enum KeyScope { User, } -pub type RangeSplit = (ThreeDRange, SplitAction); +pub type RangeSplit = (Range3d, SplitAction); #[derive(Debug)] pub enum SplitAction { diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index e2d5c723f3..5fecda1135 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -5,10 +5,15 @@ use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; use iroh_willow::{ - interest::{Interests, IntoAreaOfInterest}, proto::{grouping::{Area, AreaExt}, willow::{Path, PathExt}}, session::{ + interest::{Interests, IntoAreaOfInterest}, + proto::{ + grouping::{Area, AreaExt}, + willow::{Path, PathExt}, + }, + session::{ intents::{Completion, EventKind}, SessionInit, SessionMode, - } + }, }; use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; @@ -72,7 +77,8 @@ async fn peer_manager_two_intents() -> Result<()> { async move { let path = Path::from_bytes(&[b"bar"]).unwrap(); - let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); + let interests = + Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); let init = SessionInit::new(interests, SessionMode::ReconcileOnce); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); @@ -280,11 +286,15 @@ mod util { use tokio::task::JoinHandle; use iroh_willow::{ - engine::{AcceptOpts, Engine}, form::EntryForm, interest::{CapSelector, DelegateTo, RestrictArea}, net::ALPN, proto::{ + engine::{AcceptOpts, Engine}, + form::EntryForm, + interest::{CapSelector, DelegateTo, RestrictArea}, + net::ALPN, + proto::{ keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, willow::{Path, PathExt}, - } + }, }; pub fn create_rng(seed: &str) -> ChaCha12Rng { From 8a104f161b412bbf449269e12ece9072a172b750 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 00:32:26 +0200 Subject: [PATCH 128/198] refactor: further cleanup --- Cargo.lock | 3 - iroh-willow/src/proto/data_model.rs | 194 ++---- iroh-willow/src/proto/grouping.rs | 80 +-- iroh-willow/src/proto/meadowcap.rs | 830 +---------------------- iroh-willow/src/proto/sync.rs | 1 - iroh-willow/src/session/data.rs | 4 +- iroh-willow/src/session/error.rs | 11 +- iroh-willow/src/session/run.rs | 7 +- iroh-willow/src/session/static_tokens.rs | 9 +- iroh-willow/src/store.rs | 11 +- iroh-willow/src/store/entry.rs | 2 +- iroh-willow/src/store/memory.rs | 7 +- iroh-willow/src/store/traits.rs | 2 +- iroh-willow/src/util.rs | 1 + iroh-willow/src/util/codec2.rs | 35 + iroh-willow/tests/basic.rs | 4 +- 16 files changed, 185 insertions(+), 1016 deletions(-) delete mode 100644 iroh-willow/src/proto/sync.rs create mode 100644 iroh-willow/src/util/codec2.rs diff --git a/Cargo.lock b/Cargo.lock index be5b667d25..7d9779ed00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3238,7 +3238,6 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "either", "signature", @@ -6638,7 +6637,6 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "bytes", "either", @@ -6650,7 +6648,6 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh#75369f396bfb448f9ba6d0bb88333704d8109578" dependencies = [ "either", "syncify", diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 55df4bf3ea..5941cbe671 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -1,6 +1,5 @@ use iroh_base::hash::Hash; use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; -use willow_data_model::{AuthorisationToken as _, InvalidPathError}; use willow_encoding::sync::{Decodable, Encodable}; use super::{ @@ -8,6 +7,9 @@ use super::{ meadowcap::{self}, }; +pub use willow_data_model::InvalidPathError; +pub use willow_data_model::UnauthorisedWriteError; + /// A type for identifying namespaces. pub type NamespaceId = keys::NamespaceId; @@ -17,7 +19,7 @@ pub type SubspaceId = keys::UserId; /// The capability type needed to authorize writes. pub type WriteCapability = meadowcap::McCapability; -/// The capability type needed to authorize writes (serializable). +/// The capability type needed to authorize writes (serde serializable). pub type SerdeWriteCapability = meadowcap::serde_encoding::SerdeMcCapability; /// A Timestamp is a 64-bit unsigned integer, that is, a natural number between zero (inclusive) and 2^64 - 1 (exclusive). @@ -39,8 +41,12 @@ pub const MAX_PATH_LENGTH: usize = 4096; /// The byte length of a [`PayloadDigest`]. pub const DIGEST_LENGTH: usize = 32; +/// See [`willow_data_model::Component`]. pub type Component<'a> = willow_data_model::Component<'a, MAX_COMPONENT_LENGTH>; +/// A payload digest used in entries. +/// +/// This wraps a [`Hash`] blake3 hash. #[derive( Debug, Clone, @@ -62,24 +68,35 @@ impl Default for PayloadDigest { } } -// #[derive( -// Debug, -// Clone, -// Hash, -// Eq, -// PartialEq, -// Ord, -// PartialOrd, -// derive_more::From, -// derive_more::Into, -// derive_more::Deref, -// )] -// pub struct Path( -// willow_data_model::Path, -// ); +impl willow_data_model::PayloadDigest for PayloadDigest {} +/// See [`willow_data_model::Path`]. pub type Path = willow_data_model::Path; +/// Extension methods for [`Path`]. +pub trait PathExt { + /// Creates a new path from a slice of bytes. + fn from_bytes(slices: &[&[u8]]) -> Result; +} + +impl PathExt for Path { + fn from_bytes(slices: &[&[u8]]) -> Result { + let component_count = slices.len(); + let total_len = slices.iter().map(|x| x.len()).sum::(); + let iter = slices.iter().filter_map(|c| Component::new(c)); + // TODO: Avoid this alloc by adding willow_data_model::Path::try_new_from_iter or such. + let mut iter = iter.collect::>().into_iter(); + let path = willow_data_model::Path::new_from_iter(total_len, &mut iter)?; + if path.get_component_count() != component_count { + Err(InvalidPathError2::ComponentTooLong( + path.get_component_count(), + )) + } else { + Ok(path) + } + } +} + #[derive(Debug, thiserror::Error)] /// An error arising from trying to construct a invalid [`Path`] from valid components. pub enum InvalidPathError2 { @@ -103,28 +120,11 @@ impl From for InvalidPathError2 { } } -pub trait PathExt { - fn from_bytes(slices: &[&[u8]]) -> Result; -} - -impl PathExt for Path { - fn from_bytes(slices: &[&[u8]]) -> Result { - let component_count = slices.len(); - let total_len = slices.iter().map(|x| x.len()).sum::(); - let iter = slices.iter().filter_map(|c| Component::new(c)); - // TODO: Avoid this alloc by adding willow_data_model::Path::try_new_from_iter or such. - let mut iter = iter.collect::>().into_iter(); - let path = willow_data_model::Path::new_from_iter(total_len, &mut iter)?; - if path.get_component_count() != component_count { - Err(InvalidPathError2::ComponentTooLong( - path.get_component_count(), - )) - } else { - Ok(path) - } - } -} - +/// An entry in a willow store. +/// +/// Contains the metadata associated with each [`PayloadDigest`]. +/// +/// See [`willow_data_model::Entry`]. pub type Entry = willow_data_model::Entry< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -134,10 +134,16 @@ pub type Entry = willow_data_model::Entry< PayloadDigest, >; +/// Extension methods for [`Entry`]. pub trait EntryExt { + /// Encodes the entry into a bytestring. fn encode_to_vec(&self) -> Vec; + + /// Decodes an entry from a bytestring. fn decode_from_slice(bytes: &[u8]) -> anyhow::Result; - fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path); + + /// Returns a tuple of namespace, subspace and path. + fn as_sortable_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path); } impl EntryExt for Entry { @@ -152,80 +158,25 @@ impl EntryExt for Entry { Ok(entry) } - fn as_set_sort_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path) { + fn as_sortable_tuple(&self) -> (&NamespaceId, &SubspaceId, &Path) { (self.namespace_id(), self.subspace_id(), self.path()) } } -#[derive(Debug, Clone)] -pub struct AuthorisedEntry(pub Entry, pub AuthorisationToken); - -impl std::ops::Deref for AuthorisedEntry { - type Target = Entry; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl AuthorisedEntry { - pub fn entry(&self) -> &Entry { - &self.0 - } - - pub fn try_authorise(entry: Entry, token: AuthorisationToken) -> Result { - if token.is_authorised_write(&entry) { - Ok(AuthorisedEntry(entry, token)) - } else { - Err(Unauthorised) - } - } - - pub fn into_parts(self) -> (Entry, AuthorisationToken) { - (self.0, self.1) - } -} - -/// Error returned for entries that are not authorised. +/// An entry in a willow store. /// -/// See [`is_authorised_write`] for details. -#[derive(Debug, thiserror::Error)] -#[error("Entry is not authorised")] -pub struct Unauthorised; - -// #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] -// pub type AuthorisedEntry = -// willow_data_model::AuthorisedEntry< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// NamespaceId, -// SubspaceId, -// PayloadDigest, -// AuthorisationToken, -// >; - -// pub type Path = willow_data_model::Path; - -// pub type Entry = willow_data_model::Entry< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// NamespaceId, -// SubspaceId, -// PayloadDigest, -// >; - -// pub type AuthorisedEntry = willow_data_model::AuthorisedEntry< -// MAX_COMPONENT_LENGTH, -// MAX_COMPONENT_COUNT, -// MAX_PATH_LENGTH, -// NamespaceId, -// SubspaceId, -// PayloadDigest, -// AuthorisationToken, -// >; - -impl willow_data_model::PayloadDigest for PayloadDigest {} +/// Contains the metadata associated with each [`PayloadDigest`]. +/// +/// See [`willow_data_model::Entry`]. +pub type AuthorisedEntry = willow_data_model::AuthorisedEntry< + MAX_COMPONENT_LENGTH, + MAX_COMPONENT_COUNT, + MAX_PATH_LENGTH, + NamespaceId, + SubspaceId, + PayloadDigest, + AuthorisationToken, +>; use syncify::syncify; use syncify::syncify_replace; @@ -268,23 +219,19 @@ mod encoding { } pub mod serde_encoding { - use serde::{Deserialize, Deserializer, Serialize}; - use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; - use willow_encoding::sync::{Decodable, Encodable}; + use serde::{de, Deserialize, Deserializer, Serialize}; + + use crate::util::codec2::{from_bytes, to_vec}; use super::*; + /// [`Entry`] wrapper that can be serialized with [`serde`]. #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] pub struct SerdeEntry(pub Entry); impl Serialize for SerdeEntry { fn serialize(&self, serializer: S) -> Result { - let encoded = { - let mut consumer = IntoVec::::new(); - self.0.encode(&mut consumer).expect("encoding not to fail"); - consumer.into_vec() - }; - encoded.serialize(serializer) + to_vec(&self.0).serialize(serializer) } } @@ -293,14 +240,9 @@ pub mod serde_encoding { where D: Deserializer<'de>, { - let data: Vec = Deserialize::deserialize(deserializer)?; - let decoded = { - let mut producer = FromSlice::new(&data); - let decoded = willow_data_model::Entry::decode(&mut producer) - .map_err(serde::de::Error::custom)?; - Self(decoded) - }; - Ok(decoded) + let bytes: Vec = Deserialize::deserialize(deserializer)?; + let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; + Ok(Self(decoded)) } } } diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 96384ee4a0..4f393ec126 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -5,6 +5,7 @@ use super::data_model::{ Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, }; +/// See [`willow_data_model::grouping::Range3d`]. pub type Range3d = willow_data_model::grouping::Range3d< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -12,6 +13,7 @@ pub type Range3d = willow_data_model::grouping::Range3d< SubspaceId, >; +/// See [`willow_data_model::grouping::Area`]. pub type Area = willow_data_model::grouping::Area< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -19,8 +21,10 @@ pub type Area = willow_data_model::grouping::Area< SubspaceId, >; +/// See [`willow_data_model::grouping::AreaSubspace`]. pub type AreaSubspace = willow_data_model::grouping::AreaSubspace; +/// See [`willow_data_model::grouping::AreaOfInterest`]. pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< MAX_COMPONENT_LENGTH, MAX_COMPONENT_COUNT, @@ -28,7 +32,9 @@ pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< SubspaceId, >; +/// Extension methods for [`AreaOfInterest`]. pub trait AreaOfInterestExt { + /// Creates a new area of interest with the specified area and no other limits. fn with_area(area: Area) -> AreaOfInterest; } @@ -42,9 +48,15 @@ impl AreaOfInterestExt for AreaOfInterest { } } +/// Extension methods for [`Area`]. pub trait AreaExt { + /// Returns `true` if the area contains `point`. fn includes_point(&self, point: &Point) -> bool; + + /// Creates a new area with `path` as prefix and no constraints on subspace or timestamp. fn new_path(path: Path) -> Area; + + /// Converts the area into a [`Range3d`]. fn to_range(&self) -> Range3d; } @@ -101,18 +113,15 @@ impl Point { } pub fn into_area(&self) -> Area { - let times = Range { - start: self.timestamp, - end: RangeEnd::Closed(self.timestamp + 1), - }; + let times = Range::new_closed(self.timestamp, self.timestamp + 1).expect("verified"); Area::new(AreaSubspace::Id(self.subspace_id), self.path.clone(), times) } } pub mod serde_encoding { - use serde::{Deserialize, Deserializer, Serialize}; - use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; - use willow_encoding::sync::{RelativeDecodable, RelativeEncodable}; + use serde::{de, Deserialize, Deserializer, Serialize}; + + use crate::util::codec2::{from_bytes_relative, to_vec_relative}; use super::*; @@ -123,15 +132,8 @@ pub mod serde_encoding { impl Serialize for SerdeAreaOfInterest { fn serialize(&self, serializer: S) -> Result { - let relative = Area::new_full(); - let encoded_area = { - let mut consumer = IntoVec::::new(); - self.0 - .area - .relative_encode(&relative, &mut consumer) - .expect("encoding not to fail"); - consumer.into_vec() - }; + let previous = Area::new_full(); + let encoded_area = to_vec_relative(&previous, &self.0.area); (encoded_area, self.0.max_count, self.0.max_size).serialize(serializer) } } @@ -144,18 +146,8 @@ pub mod serde_encoding { let relative = Area::new_full(); let (encoded_area, max_count, max_size): (Vec, u64, u64) = Deserialize::deserialize(deserializer)?; - let decoded_area = { - let mut producer = FromSlice::new(&encoded_area); - - willow_data_model::grouping::Area::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))? - }; - let aoi = willow_data_model::grouping::AreaOfInterest { - area: decoded_area, - max_count, - max_size, - }; - Ok(Self(aoi)) + let area = from_bytes_relative(&relative, &encoded_area).map_err(de::Error::custom)?; + Ok(Self(AreaOfInterest::new(area, max_count, max_size))) } } @@ -164,19 +156,8 @@ pub mod serde_encoding { impl Serialize for SerdeRange3d { fn serialize(&self, serializer: S) -> Result { - let relative = Range3d::new( - Default::default(), - Range::new_open(Path::new_empty()), - Default::default(), - ); - let encoded = { - let mut consumer = IntoVec::::new(); - self.0 - .relative_encode(&relative, &mut consumer) - .expect("encoding not to fail"); - consumer.into_vec() - }; - encoded.serialize(serializer) + let previous = Range3d::new_full(); + to_vec_relative(&previous, &self.0).serialize(serializer) } } @@ -185,19 +166,10 @@ pub mod serde_encoding { where D: Deserializer<'de>, { - let relative = Range3d::new( - Default::default(), - Range::new_open(Path::new_empty()), - Default::default(), - ); - let encoded_range: Vec = Deserialize::deserialize(deserializer)?; - let decoded_range = { - let mut producer = FromSlice::new(&encoded_range); - - willow_data_model::grouping::Range3d::relative_decode(&relative, &mut producer) - .map_err(|err| serde::de::Error::custom(format!("{err}")))? - }; - Ok(Self(decoded_range)) + let previous = Range3d::new_full(); + let bytes: Vec = Deserialize::deserialize(deserializer)?; + let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; + Ok(Self(decoded)) } } } diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 79c1d5a67d..3249e66854 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -72,7 +72,6 @@ pub type FailedDelegationError = meadowcap::FailedDelegationError< >; /// Represents an authorisation to read an area of data in a Namespace. -// TODO: Move somewhere else? #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct ReadAuthorisation(McCapability, Option); @@ -130,12 +129,20 @@ impl ReadAuthorisation { } } +/// Returns `true` if `self` covers a larger area than `other`, +/// or if covers the same area and has less delegations. +pub fn is_wider_than(a: &McCapability, b: &McCapability) -> bool { + (a.granted_area().includes_area(&b.granted_area())) + || (a.granted_area() == b.granted_area() && a.delegations().len() < b.delegations().len()) +} + pub mod serde_encoding { - use serde::{Deserialize, Deserializer}; - use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; - use willow_encoding::sync::{Decodable, Encodable, RelativeDecodable, RelativeEncodable}; + use serde::{de, Deserialize, Deserializer}; - use crate::proto::grouping::Area; + use crate::{ + proto::grouping::Area, + util::codec2::{from_bytes, from_bytes_relative, to_vec, to_vec_relative}, + }; use super::*; @@ -146,21 +153,8 @@ pub mod serde_encoding { impl Serialize for SerdeReadAuthorisation { fn serialize(&self, serializer: S) -> Result { - let relative = Area::new_full(); - let encoded_cap = { - let mut consumer = IntoVec::::new(); - self.0 - .0 - .relative_encode(&relative, &mut consumer) - .expect("encoding not to fail"); - consumer.into_vec() - }; - - let encoded_subspace_cap = self.0 .1.as_ref().map(|cap| { - let mut consumer = IntoVec::::new(); - cap.encode(&mut consumer).expect("encoding not to fail"); - consumer.into_vec() - }); + let encoded_cap = to_vec_relative(&Area::new_full(), &self.0 .0); + let encoded_subspace_cap = self.0 .1.as_ref().map(to_vec); (encoded_cap, encoded_subspace_cap).serialize(serializer) } } @@ -170,10 +164,8 @@ pub mod serde_encoding { where D: Deserializer<'de>, { - let (read_cap, subspace_cap) = - <(SerdeMcCapability, Option)>::deserialize( - deserializer, - )?; + let (read_cap, subspace_cap): (SerdeMcCapability, Option) = + Deserialize::deserialize(deserializer)?; Ok(Self(ReadAuthorisation( read_cap.into(), subspace_cap.map(Into::into), @@ -188,15 +180,8 @@ pub mod serde_encoding { impl Serialize for SerdeMcCapability { fn serialize(&self, serializer: S) -> Result { - let relative = Area::new_full(); - let encoded = { - let mut consumer = IntoVec::::new(); - self.0 - .relative_encode(&relative, &mut consumer) - .expect("encoding not to fail"); - consumer.into_vec() - }; - encoded.serialize(serializer) + let previous = Area::new_full(); + to_vec_relative(&previous, &self.0).serialize(serializer) } } @@ -205,31 +190,19 @@ pub mod serde_encoding { where D: Deserializer<'de>, { - let relative = Area::new_full(); - let data: Vec = Deserialize::deserialize(deserializer)?; - let decoded = { - let mut producer = FromSlice::new(&data); - let decoded = McCapability::relative_decode(&relative, &mut producer) - .map_err(serde::de::Error::custom)?; - Self(decoded) - }; - Ok(decoded) + let previous = Area::new_full(); + let bytes: Vec = Deserialize::deserialize(deserializer)?; + let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; + Ok(Self(decoded)) } } - #[derive( - Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, - )] + #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] pub struct SerdeMcSubspaceCapability(pub McSubspaceCapability); impl Serialize for SerdeMcSubspaceCapability { fn serialize(&self, serializer: S) -> Result { - let encoded = { - let mut consumer = IntoVec::::new(); - self.0.encode(&mut consumer).expect("encoding not to fail"); - consumer.into_vec() - }; - encoded.serialize(serializer) + to_vec(&self.0).serialize(serializer) } } @@ -238,758 +211,9 @@ pub mod serde_encoding { where D: Deserializer<'de>, { - let data: Vec = Deserialize::deserialize(deserializer)?; - let decoded = { - let mut producer = FromSlice::new(&data); - let decoded = McSubspaceCapability::decode(&mut producer) - .map_err(serde::de::Error::custom)?; - Self(decoded) - }; - Ok(decoded) + let bytes: Vec = Deserialize::deserialize(deserializer)?; + let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; + Ok(Self(decoded)) } } } - -/// Returns `true` if `self` covers a larger area than `other`, -/// or if covers the same area and has less delegations. -pub fn is_wider_than(a: &McCapability, b: &McCapability) -> bool { - (a.granted_area().includes_area(&b.granted_area())) - || (a.granted_area() == b.granted_area() && a.delegations().len() < b.delegations().len()) -} - -// use std::{io::Write, sync::Arc}; - -// use serde::{Deserialize, Serialize}; - -// use crate::{proto::grouping::NotIncluded, util::codec::Encoder}; - -// use super::{ -// grouping::{Area, AreaInArea}, -// keys::{self, NamespaceSecretKey, UserSecretKey, PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH}, -// data_model::{AuthorisedEntry, Entry, Unauthorised}, -// }; - -// pub type UserPublicKey = keys::UserPublicKey; -// pub type NamespacePublicKey = keys::NamespacePublicKey; -// pub type UserId = keys::UserId; -// pub type NamespaceId = keys::NamespaceId; -// pub type UserSignature = keys::UserSignature; -// pub type NamespaceSignature = keys::NamespaceSignature; - -// #[derive(Debug, derive_more::From)] -// pub enum SecretKey { -// User(UserSecretKey), -// Namespace(NamespaceSecretKey), -// } - -// pub fn is_authorised_write(entry: &Entry, token: &MeadowcapAuthorisationToken) -> bool { -// let (capability, signature) = token.as_parts(); - -// capability.is_valid() -// && capability.access_mode() == AccessMode::ReadWrite -// && capability.granted_area().includes_entry(entry) -// && capability -// .receiver() -// // TODO: This allocates each time, avoid -// .verify(&entry.encode().expect("encoding not to fail"), signature) -// .is_ok() -// } - -// pub fn create_token( -// entry: &Entry, -// capability: McCapability, -// secret_key: &UserSecretKey, -// ) -> MeadowcapAuthorisationToken { -// // TODO: This allocates each time, avoid -// let signable = entry.encode().expect("encoding not to fail"); -// let signature = secret_key.sign(&signable); -// MeadowcapAuthorisationToken::from_parts(capability, signature) -// } - -// pub fn attach_authorisation( -// entry: Entry, -// capability: McCapability, -// secret_key: &UserSecretKey, -// ) -> Result { -// if capability.access_mode() != AccessMode::ReadWrite -// || capability.granted_namespace().id() != entry.namespace_id -// || !capability.granted_area().includes_entry(&entry) -// || capability.receiver() != &secret_key.public_key() -// { -// return Err(InvalidParams); -// } -// let token = create_token(&entry, capability, secret_key); -// Ok(AuthorisedEntry::from_parts_unchecked(entry, token)) -// } - -// #[derive(Debug, thiserror::Error)] -// #[error("invalid parameters")] -// pub struct InvalidParams; - -// #[derive(Debug, thiserror::Error)] -// #[error("invalid capability")] -// pub struct InvalidCapability; - -// /// To be used as an AuthorisationToken for Willow. -// #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] -// pub struct MeadowcapAuthorisationToken { -// /// Certifies that an Entry may be written. -// pub capability: McCapability, -// /// Proves that the Entry was created by the receiver of the capability. -// pub signature: UserSignature, -// } - -// // TODO: We clone these a bunch where it wouldn't be needed if we could create a reference type to -// // which the [`MeadowcapAuthorisationToken`] would deref to, but I couldn't make it work nice -// // enough. -// // #[derive(Debug, Clone, Eq, PartialEq)] -// // pub struct MeadowcapAuthorisationTokenRef<'a> { -// // /// Certifies that an Entry may be written. -// // pub capability: &'a McCapability, -// // /// Proves that the Entry was created by the receiver of the capability. -// // pub signature: &'a UserSignature, -// // } - -// impl MeadowcapAuthorisationToken { -// pub fn from_parts(capability: McCapability, signature: UserSignature) -> Self { -// Self { -// capability, -// signature, -// } -// } -// pub fn as_parts(&self) -> (&McCapability, &UserSignature) { -// (&self.capability, &self.signature) -// } - -// pub fn into_parts(self) -> (McCapability, UserSignature) { -// (self.capability, self.signature) -// } -// } - -// impl From<(McCapability, UserSignature)> for MeadowcapAuthorisationToken { -// fn from((capability, signature): (McCapability, UserSignature)) -> Self { -// Self::from_parts(capability, signature) -// } -// } - -// #[derive(Debug, Clone, derive_more::Deref, derive_more::Into)] -// pub struct ValidatedCapability(McCapability); - -// impl ValidatedCapability { -// pub fn new(cap: McCapability) -> Result { -// if cap.is_valid() { -// Ok(Self(cap)) -// } else { -// Err(InvalidCapability) -// } -// } - -// pub fn is_valid(&self) -> bool { -// true -// } - -// pub fn new_unchecked(cap: McCapability) -> Self { -// Self(cap) -// } -// } - -// #[derive( -// Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, derive_more::From, -// )] -// pub enum McCapability { -// Communal(Arc), -// Owned(Arc), -// } - -// impl McCapability { -// pub fn new_owned( -// namespace_secret: &NamespaceSecretKey, -// user_key: UserPublicKey, -// access_mode: AccessMode, -// ) -> Self { -// McCapability::Owned(Arc::new(OwnedCapability::new( -// namespace_secret, -// user_key, -// access_mode, -// ))) -// } - -// pub fn new_communal( -// namespace_key: NamespacePublicKey, -// user_key: UserPublicKey, -// access_mode: AccessMode, -// ) -> Self { -// McCapability::Communal(Arc::new(CommunalCapability::new( -// namespace_key, -// user_key, -// access_mode, -// ))) -// } -// pub fn access_mode(&self) -> AccessMode { -// match self { -// Self::Communal(cap) => cap.access_mode, -// Self::Owned(cap) => cap.access_mode, -// } -// } -// pub fn receiver(&self) -> &UserPublicKey { -// match self { -// Self::Communal(cap) => cap.receiver(), -// Self::Owned(cap) => cap.receiver(), -// } -// } - -// pub fn granted_namespace(&self) -> &NamespacePublicKey { -// match self { -// Self::Communal(cap) => cap.granted_namespace(), -// Self::Owned(cap) => cap.granted_namespace(), -// } -// } - -// pub fn granted_area(&self) -> Area { -// match self { -// Self::Communal(cap) => cap.granted_area(), -// Self::Owned(cap) => cap.granted_area(), -// } -// } - -// pub fn try_granted_area(&self, area: &Area) -> Result<(), Unauthorised> { -// if !self.granted_area().includes_area(area) { -// Err(Unauthorised) -// } else { -// Ok(()) -// } -// } - -// pub fn is_valid(&self) -> bool { -// match self { -// Self::Communal(cap) => cap.is_valid(), -// Self::Owned(cap) => cap.is_valid(), -// } -// } -// // pub fn validate(&self) -> Result<(), InvalidCapability> { -// pub fn validate(&self) -> anyhow::Result<()> { -// match self { -// Self::Communal(cap) => cap.validate(), -// Self::Owned(cap) => cap.validate(), -// } -// } - -// pub fn delegations(&self) -> &[Delegation] { -// match self { -// Self::Communal(cap) => &cap.delegations, -// Self::Owned(cap) => &cap.delegations, -// } -// } - -// /// Returns `true` if `self` covers a larger area than `other`, -// /// or if covers the same area and has less delegations. -// pub fn is_wider_than(&self, other: &Self) -> bool { -// (self.granted_area().includes_area(&other.granted_area())) -// || (self.granted_area() == other.granted_area() -// && self.delegations().len() < other.delegations().len()) -// } - -// pub fn delegate( -// &self, -// user_secret: &UserSecretKey, -// new_user: UserPublicKey, -// new_area: Area, -// ) -> anyhow::Result { -// let cap = match self { -// Self::Communal(cap) => { -// Self::Communal(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) -// } -// Self::Owned(cap) => { -// Self::Owned(Arc::new(cap.delegate(user_secret, new_user, new_area)?)) -// } -// }; -// Ok(cap) -// } -// } - -// impl Encoder for McCapability { -// // TODO: Use spec-compliant encoding instead of postcard. -// fn encoded_len(&self) -> usize { -// postcard::experimental::serialized_size(&self).unwrap() -// } - -// // TODO: Use spec-compliant encoding instead of postcard. -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// postcard::to_io(&self, out)?; -// Ok(()) -// } -// } - -// impl Encoder for McSubspaceCapability { -// // TODO: Use spec-compliant encoding instead of postcard. -// fn encoded_len(&self) -> usize { -// postcard::experimental::serialized_size(&self).unwrap() -// } - -// // TODO: Use spec-compliant encoding instead of postcard. -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// postcard::to_io(&self, out)?; -// Ok(()) -// } -// } - -// #[derive(Debug, Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] -// pub enum AccessMode { -// ReadOnly, -// ReadWrite, -// } - -// /// A capability that authorizes reads or writes in communal namespaces. -// #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -// pub struct CommunalCapability { -// /// The kind of access this grants. -// access_mode: AccessMode, -// /// The namespace in which this grants access. -// namespace_key: NamespacePublicKey, -// /// The subspace for which and to whom this grants access. -// /// -// /// Remember that we assume SubspaceId and UserPublicKey to be the same types. -// user_key: UserPublicKey, -// /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. -// delegations: Vec, -// } - -// impl CommunalCapability { -// pub fn new( -// namespace_key: NamespacePublicKey, -// user_key: UserPublicKey, -// access_mode: AccessMode, -// ) -> Self { -// Self { -// access_mode, -// namespace_key, -// user_key, -// delegations: Default::default(), -// } -// } -// pub fn receiver(&self) -> &UserPublicKey { -// match self.delegations.last() { -// None => &self.user_key, -// Some(Delegation(_, user_key, _)) => user_key, -// } -// } - -// pub fn granted_namespace(&self) -> &NamespacePublicKey { -// &self.namespace_key -// } - -// pub fn granted_area(&self) -> Area { -// match self.delegations.last() { -// None => Area::subspace(self.user_key.into()), -// Some(Delegation(area, _, _)) => area.clone(), -// } -// } - -// pub fn is_valid(&self) -> bool { -// self.validate().is_ok() -// } - -// pub fn validate(&self) -> anyhow::Result<()> { -// if self.delegations.is_empty() { -// // communal capabilities without delegations are always valid -// Ok(()) -// } else { -// let mut prev = None; -// let mut prev_receiver = &self.user_key; -// for delegation in self.delegations.iter() { -// let Delegation(new_area, new_user, new_signature) = &delegation; -// let signable = self.handover(prev, new_area, new_user)?; -// prev_receiver.verify(&signable, new_signature)?; -// prev = Some((new_area, new_signature)); -// prev_receiver = new_user; -// } -// Ok(()) -// } -// } - -// pub fn delegate( -// &self, -// user_secret: &UserSecretKey, -// new_user: UserPublicKey, -// new_area: Area, -// ) -> anyhow::Result { -// if user_secret.public_key() != *self.receiver() { -// anyhow::bail!("Secret key does not match receiver of current capability"); -// } -// let prev = self -// .delegations -// .last() -// .map(|Delegation(area, _user_key, sig)| (area, sig)); -// let handover = self.handover(prev, &new_area, &new_user)?; -// let signature = user_secret.sign(&handover); -// let delegation = Delegation(new_area, new_user, signature); -// let mut cap = self.clone(); -// cap.delegations.push(delegation); -// Ok(cap) -// } - -// fn handover( -// &self, -// prev: Option<(&Area, &UserSignature)>, -// new_area: &Area, -// new_user: &UserPublicKey, -// ) -> anyhow::Result> { -// match prev { -// None => self.initial_handover(new_area, new_user), -// Some((prev_area, prev_signature)) => Handover::new( -// prev_area, -// PrevSignature::User(prev_signature), -// new_area, -// new_user, -// )? -// .encode(), -// } -// } - -// fn initial_handover( -// &self, -// new_area: &Area, -// new_user: &UserPublicKey, -// ) -> anyhow::Result> { -// let prev_area = Area::subspace(self.user_key.into()); -// let area_in_area = AreaInArea::new(new_area, &prev_area)?; -// let len = -// 1 + NamespacePublicKey::LENGTH + area_in_area.encoded_len() + UserPublicKey::LENGTH; -// let mut out = std::io::Cursor::new(vec![0u8; len]); -// let init = match self.access_mode { -// AccessMode::ReadOnly => 0x00, -// AccessMode::ReadWrite => 0x01, -// }; -// out.write_all(&[init])?; -// out.write_all(&self.namespace_key.to_bytes())?; -// area_in_area.encode_into(&mut out)?; -// out.write_all(&new_user.to_bytes())?; -// Ok(out.into_inner()) -// } -// } - -// #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] -// pub struct Delegation(Area, UserPublicKey, UserSignature); - -// /// A capability that authorizes reads or writes in owned namespaces. -// #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -// pub struct OwnedCapability { -// /// The kind of access this grants. -// access_mode: AccessMode, -// /// The namespace for which this grants access. -// namespace_key: NamespacePublicKey, -// /// The user to whom this grants access; granting access for the full namespace_key, not just to a subspace. -// user_key: UserPublicKey, -// /// Authorisation of the user_key by the namespace_key., -// initial_authorisation: NamespaceSignature, -// /// Successive authorisations of new UserPublicKeys, each restricted to a particular Area. -// delegations: Vec, -// } - -// impl OwnedCapability { -// pub fn new( -// namespace_secret_key: &NamespaceSecretKey, -// user_key: UserPublicKey, -// access_mode: AccessMode, -// ) -> Self { -// let namespace_key = namespace_secret_key.public_key(); -// let handover = Self::initial_handover(access_mode, &user_key); -// let initial_authorisation = namespace_secret_key.sign(&handover); -// Self { -// access_mode, -// namespace_key, -// user_key, -// initial_authorisation, -// delegations: Default::default(), -// } -// } - -// pub fn receiver(&self) -> &UserPublicKey { -// match self.delegations.last() { -// None => &self.user_key, -// Some(Delegation(_, user_key, _)) => user_key, -// } -// } - -// pub fn granted_namespace(&self) -> &NamespacePublicKey { -// &self.namespace_key -// } - -// pub fn granted_area(&self) -> Area { -// match self.delegations.last() { -// None => Area::full(), -// Some(Delegation(area, _, _)) => area.clone(), -// } -// } - -// pub fn is_valid(&self) -> bool { -// self.validate().is_ok() -// } - -// pub fn validate(&self) -> anyhow::Result<()> { -// // verify root authorisation -// let handover = Self::initial_handover(self.access_mode, &self.user_key); -// self.namespace_key -// .verify(&handover, &self.initial_authorisation)?; - -// // no delegations: done -// if self.delegations.is_empty() { -// return Ok(()); -// } - -// let initial_area = Area::full(); -// let mut prev = ( -// &initial_area, -// &self.user_key, -// PrevSignature::Namespace(&self.initial_authorisation), -// ); -// for delegation in self.delegations.iter() { -// let (prev_area, prev_user, prev_signature) = prev; -// let Delegation(new_area, new_user, new_signature) = delegation; -// let handover = -// Handover::new(prev_area, prev_signature, new_area, new_user)?.encode()?; -// prev_user.verify(&handover, new_signature)?; -// prev = (new_area, new_user, PrevSignature::User(new_signature)); -// } -// Ok(()) -// } - -// fn initial_handover( -// access_mode: AccessMode, -// user_key: &UserPublicKey, -// ) -> [u8; PUBLIC_KEY_LENGTH + 1] { -// let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; -// // https://willowprotocol.org/specs/meadowcap/index.html#owned_cap_valid -// // An OwnedCapability with zero delegations is valid if initial_authorisation -// // is a NamespaceSignature issued by the namespace_key over -// // either the byte 0x02 (if access_mode is read) -// // or the byte 0x03 (if access_mode is write), -// // followed by the user_key (encoded via encode_user_pk). -// signable[0] = match access_mode { -// AccessMode::ReadOnly => 0x02, -// AccessMode::ReadWrite => 0x03, -// }; -// signable[1..].copy_from_slice(user_key.as_bytes()); -// signable -// } - -// pub fn delegate( -// &self, -// secret_key: &UserSecretKey, -// new_user: UserPublicKey, -// new_area: Area, -// ) -> anyhow::Result { -// if secret_key.public_key() != *self.receiver() { -// anyhow::bail!("Secret key does not match receiver of current capability"); -// } -// let prev_signature = match self.delegations.last() { -// None => PrevSignature::Namespace(&self.initial_authorisation), -// Some(Delegation(_, _, prev_signature)) => PrevSignature::User(prev_signature), -// }; -// let prev_area = self.granted_area(); -// let handover = Handover::new(&prev_area, prev_signature, &new_area, &new_user)?; -// let signable = handover.encode()?; -// let signature = secret_key.sign(&signable); -// let delegation = Delegation(new_area, new_user, signature); -// let mut cap = self.clone(); -// cap.delegations.push(delegation); -// Ok(cap) -// } -// } - -// #[derive(Debug)] -// enum PrevSignature<'a> { -// User(&'a UserSignature), -// Namespace(&'a NamespaceSignature), -// } - -// impl<'a> PrevSignature<'a> { -// fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] { -// match self { -// Self::User(sig) => sig.to_bytes(), -// Self::Namespace(sig) => sig.to_bytes(), -// } -// } -// } - -// #[derive(Debug)] -// struct Handover<'a> { -// prev_signature: PrevSignature<'a>, -// new_user: &'a UserPublicKey, -// area_in_area: AreaInArea<'a>, -// } - -// impl<'a> Handover<'a> { -// fn new( -// prev_area: &'a Area, -// prev_signature: PrevSignature<'a>, -// new_area: &'a Area, -// new_user: &'a UserPublicKey, -// ) -> Result { -// let area_in_area = AreaInArea::new(new_area, prev_area)?; -// Ok(Self { -// area_in_area, -// prev_signature, -// new_user, -// }) -// } -// } - -// impl<'a> Encoder for Handover<'a> { -// fn encoded_len(&self) -> usize { -// self.area_in_area.encoded_len() + NamespaceSignature::LENGTH + UserId::LENGTH -// } -// fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { -// self.area_in_area.encode_into(out)?; -// out.write_all(&self.prev_signature.to_bytes())?; -// out.write_all(&self.new_user.to_bytes())?; -// Ok(()) -// } -// } - -// #[derive( -// Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash, derive_more::From, Ord, PartialOrd, -// )] -// /// A capability that certifies read access to arbitrary SubspaceIds at some unspecified Path. -// pub struct McSubspaceCapability { -// /// The namespace for which this grants access. -// pub namespace_key: NamespacePublicKey, - -// /// The user to whom this grants access. -// pub user_key: UserPublicKey, - -// /// Authorisation of the user_key by the namespace_key. -// pub initial_authorisation: NamespaceSignature, - -// /// Successive authorisations of new UserPublicKeys. -// pub delegations: Vec<(UserPublicKey, UserSignature)>, -// } - -// impl McSubspaceCapability { -// pub fn new(namespace_secret_key: &NamespaceSecretKey, user_key: UserPublicKey) -> Self { -// let namespace_key = namespace_secret_key.public_key(); -// let handover = Self::initial_handover(&user_key); -// let initial_authorisation = namespace_secret_key.sign(&handover); -// Self { -// namespace_key, -// user_key, -// initial_authorisation, -// delegations: Default::default(), -// } -// } -// pub fn receiver(&self) -> &UserPublicKey { -// &self.user_key -// } - -// pub fn granted_namespace(&self) -> &NamespacePublicKey { -// &self.namespace_key -// } - -// pub fn validate(&self) -> anyhow::Result<()> { -// let signable = Self::initial_handover(&self.user_key); -// self.namespace_key -// .verify(&signable, &self.initial_authorisation)?; - -// if self.delegations.is_empty() { -// return Ok(()); -// } - -// let mut prev = ( -// &self.user_key, -// PrevSignature::Namespace(&self.initial_authorisation), -// ); -// for delegation in &self.delegations { -// let (prev_user, prev_signature) = prev; -// let (new_user, new_signature) = delegation; -// let handover = Self::handover(prev_signature, new_user); -// prev_user.verify(&handover, new_signature)?; -// prev = (new_user, PrevSignature::User(new_signature)); -// } -// Ok(()) -// } - -// pub fn is_valid(&self) -> bool { -// self.validate().is_ok() -// } - -// pub fn delegate( -// &self, -// secret_key: &UserSecretKey, -// new_user: UserPublicKey, -// ) -> anyhow::Result { -// if secret_key.public_key() != *self.receiver() { -// anyhow::bail!("Secret key does not match receiver of current capability"); -// } -// let prev_signature = match self.delegations.last() { -// None => PrevSignature::Namespace(&self.initial_authorisation), -// Some((_, prev_signature)) => PrevSignature::User(prev_signature), -// }; -// let handover = Self::handover(prev_signature, &new_user); -// let signature = secret_key.sign(&handover); -// let delegation = (new_user, signature); -// let mut cap = self.clone(); -// cap.delegations.push(delegation); -// Ok(cap) -// } - -// fn handover( -// prev_signature: PrevSignature, -// new_user: &UserPublicKey, -// ) -> [u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH] { -// let mut out = [0u8; PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH]; -// out[..SIGNATURE_LENGTH].copy_from_slice(&prev_signature.to_bytes()); -// out[SIGNATURE_LENGTH..].copy_from_slice(new_user.as_bytes()); -// out -// } - -// fn initial_handover(user_key: &UserPublicKey) -> [u8; PUBLIC_KEY_LENGTH + 1] { -// let mut signable = [0u8; PUBLIC_KEY_LENGTH + 1]; -// // A McSubspaceCapability with zero delegations is valid if initial_authorisation -// // is a NamespaceSignature issued by the namespace_key over the byte 0x02, -// // followed by the user_key (encoded via encode_user_pk). -// // via https://willowprotocol.org/specs/pai/index.html#subspace_cap_valid -// signable[0] = 0x02; -// signable[1..].copy_from_slice(user_key.as_bytes()); -// signable -// } -// } - -// #[cfg(test)] -// mod tests { -// use rand_core::SeedableRng; - -// use crate::proto::{ -// grouping::Area, -// keys::{NamespaceKind, NamespaceSecretKey, UserSecretKey}, -// }; - -// use super::{AccessMode, McCapability}; - -// #[test] -// fn delegate_owned() { -// let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(1); -// let namespace_secret = NamespaceSecretKey::generate(&mut rng, NamespaceKind::Owned); -// let alfie_secret = UserSecretKey::generate(&mut rng); -// let betty_secret = UserSecretKey::generate(&mut rng); -// let alfie_public = alfie_secret.public_key(); -// let betty_public = betty_secret.public_key(); -// let cap = McCapability::new_owned(&namespace_secret, alfie_public, AccessMode::ReadWrite); -// cap.validate().expect("cap to be valid"); -// let cap_betty = cap -// .delegate(&alfie_secret, betty_public, Area::full()) -// .expect("not to fail"); -// cap_betty.validate().expect("cap to be valid"); -// let conny_secret = UserSecretKey::generate(&mut rng); -// let conny_public = conny_secret.public_key(); -// let cap_conny = cap_betty -// .delegate( -// &betty_secret, -// conny_public, -// Area::subspace(conny_public.id()), -// ) -// .expect("not to fail"); -// cap_conny.validate().expect("cap to be valid"); -// assert_eq!(cap_conny.granted_area(), Area::subspace(conny_public.id())); -// assert_eq!(cap_conny.receiver(), &conny_public); -// } -// } diff --git a/iroh-willow/src/proto/sync.rs b/iroh-willow/src/proto/sync.rs deleted file mode 100644 index a15e7a0752..0000000000 --- a/iroh-willow/src/proto/sync.rs +++ /dev/null @@ -1 +0,0 @@ -pub use super::wgps::*; diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 2aaea50404..892333cda1 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -85,7 +85,7 @@ impl DataSender { } async fn send_entry(&mut self, authorised_entry: AuthorisedEntry) -> Result<(), Error> { - let AuthorisedEntry(entry, token) = authorised_entry; + let (entry, token) = authorised_entry.into_parts(); let static_token: StaticToken = token.capability.into(); let dynamic_token = token.signature; // TODO: partial payloads @@ -163,7 +163,7 @@ impl DataReceiver { channel: EntryChannel::Data, }, )?; - let entry = authorised_entry.0; + let (entry, _token) = authorised_entry.into_parts(); // TODO: handle offset self.current_payload .set(*entry.payload_digest(), entry.payload_length())?; diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 2448e2c264..0c4450b04d 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,7 +1,7 @@ use ed25519_dalek::SignatureError; use crate::{ - proto::{data_model::Unauthorised, meadowcap::UserId, wgps::ResourceHandle}, + proto::{data_model::UnauthorisedWriteError, meadowcap::UserId, wgps::ResourceHandle}, session::{pai_finder::PaiError, resource::MissingResource}, store::traits::SecretStoreError, util::channel::{ReadError, WriteError}, @@ -43,8 +43,10 @@ pub enum Error { AreaOfInterestNamespaceMismatch, #[error("our and their area of interests do not overlap")] AreaOfInterestDoesNotOverlap, + #[error("received an area of interest which is not authorised")] + UnauthorisedArea, #[error("received an entry which is not authorised")] - UnauthorisedEntryReceived, + UnauthorisedWrite(#[from] UnauthorisedWriteError), #[error("received an unsupported message type")] UnsupportedMessage, #[error("received a message that is intended for another channel")] @@ -110,11 +112,6 @@ impl PartialEq for Error { impl Eq for Error {} -impl From for Error { - fn from(_value: Unauthorised) -> Self { - Self::UnauthorisedEntryReceived - } -} // impl From for Error { // fn from(_value: meadowcap::InvalidCapability) -> Self { // Self::InvalidCapability diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index a0d9199f28..05035c432d 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -10,10 +10,7 @@ use tracing::{debug, error_span, trace, warn, Instrument, Span}; use crate::{ net::ConnHandle, - proto::{ - data_model::Unauthorised, - wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, - }, + proto::wgps::{ControlIssueGuarantee, LogicalChannel, Message, SetupBindAreaOfInterest}, session::{ aoi_finder::{self, IntersectionFinder}, capabilities::Capabilities, @@ -324,7 +321,7 @@ pub(crate) async fn run_session( let area_of_interest = area_of_interest.0; let cap = caps.get_theirs_eventually(authorisation).await; if !cap.granted_area().includes_area(&area_of_interest.area) { - return Err(Unauthorised.into()); + return Err(Error::UnauthorisedArea); } let namespace = *cap.granted_namespace(); intersection_inbox diff --git a/iroh-willow/src/session/static_tokens.rs b/iroh-willow/src/session/static_tokens.rs index 16d5cc3053..cf2a3b0fda 100644 --- a/iroh-willow/src/session/static_tokens.rs +++ b/iroh-willow/src/session/static_tokens.rs @@ -54,13 +54,8 @@ impl StaticTokens { }) .await; - let token = AuthorisationToken { - signature: dynamic_token, - capability: static_token.into(), - }; - - let authorised_entry = AuthorisedEntry::try_authorise(entry, token)?; - + let token = AuthorisationToken::new(static_token.0, dynamic_token); + let authorised_entry = AuthorisedEntry::new(entry, token)?; Ok(authorised_entry) } } diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index ad88acb4cb..0664ed5bf6 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -76,12 +76,19 @@ impl Store { .secrets() .get_user(&user_id) .context("Missing user keypair")?; + + // TODO(frando): This should use `authorisation_token_unchecked` if we uphold the invariant + // that `user_id` is a pubkey for `secret_key`. However, that is `unsafe` at the moment + // (but should not be, IMO). + // Not using the `_unchecked` variant has the cost of an additional signature verification, + // so significant. let token = capability.authorisation_token(&entry, secret_key)?; - let authorised_entry = AuthorisedEntry(entry, token); + let authorised_entry = AuthorisedEntry::new_unchecked(entry, token); let inserted = self .entries() .ingest(&authorised_entry, EntryOrigin::Local)?; - Ok((authorised_entry.0, inserted)) + let (entry, _token) = authorised_entry.into_parts(); + Ok((entry, inserted)) } pub fn create_namespace( diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index f5b3e21c16..dd92b12f67 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -148,7 +148,7 @@ impl Broadcaster { } fn broadcast(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) { - let Some(sessions) = self.watched_areas.get_mut(entry.namespace_id()) else { + let Some(sessions) = self.watched_areas.get_mut(entry.entry().namespace_id()) else { return; }; let mut dropped_receivers = vec![]; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 6db5e50754..fef65673fc 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -120,7 +120,7 @@ impl traits::EntryReader for Rc> { .filter_map(|e| e.ok()) .collect(); - entries.sort_by(|e1, e2| e1.as_set_sort_tuple().cmp(&e2.as_set_sort_tuple())); + entries.sort_by(|e1, e2| e1.as_sortable_tuple().cmp(&e2.as_sortable_tuple())); let split_index = count / 2; let mid = entries.get(split_index).expect("not empty"); @@ -210,7 +210,10 @@ impl traits::EntryStorage for Rc> { fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { let mut slf = self.borrow_mut(); - let entries = slf.entries.entry(*entry.namespace_id()).or_default(); + let entries = slf + .entries + .entry(*entry.entry().namespace_id()) + .or_default(); let new = entry.entry(); let mut to_remove = vec![]; for (i, existing) in entries.iter().enumerate() { diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index be5235a59f..caa4beb735 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -102,7 +102,7 @@ pub trait EntryReader: Debug + 'static { range: &Range3d, ) -> impl Iterator> { self.get_entries_with_authorisation(namespace, range) - .map(|e| e.map(|e| e.0)) + .map(|e| e.map(|e| e.into_parts().0)) } } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 948766804f..f83462e079 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -2,6 +2,7 @@ pub mod channel; pub mod codec; +pub mod codec2; pub mod gen_stream; pub mod queue; pub mod stream; diff --git a/iroh-willow/src/util/codec2.rs b/iroh-willow/src/util/codec2.rs new file mode 100644 index 0000000000..bbee8cf0f1 --- /dev/null +++ b/iroh-willow/src/util/codec2.rs @@ -0,0 +1,35 @@ +use std::convert::Infallible; + +use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; +use willow_encoding::{ + sync::{Decodable, Encodable, RelativeDecodable, RelativeEncodable}, + DecodeError, +}; + +pub fn from_bytes(data: &[u8]) -> Result> { + let mut producer = FromSlice::new(data); + let decoded = T::decode(&mut producer)?; + Ok(decoded) +} + +pub fn to_vec(item: &T) -> Vec { + let mut consumer = IntoVec::new(); + item.encode(&mut consumer).expect("infallible"); + consumer.into_vec() +} + +pub fn from_bytes_relative, U>( + previous: &U, + data: &[u8], +) -> Result> { + let mut producer = FromSlice::new(data); + let decoded = T::relative_decode(previous, &mut producer)?; + Ok(decoded) +} + +pub fn to_vec_relative, U>(previous: &U, item: &T) -> Vec { + let mut consumer = IntoVec::new(); + item.relative_encode(previous, &mut consumer) + .expect("infallible"); + consumer.into_vec() +} diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 5fecda1135..41ab3d4f06 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -7,8 +7,8 @@ use futures_lite::StreamExt; use iroh_willow::{ interest::{Interests, IntoAreaOfInterest}, proto::{ + data_model::{Path, PathExt}, grouping::{Area, AreaExt}, - willow::{Path, PathExt}, }, session::{ intents::{Completion, EventKind}, @@ -291,9 +291,9 @@ mod util { interest::{CapSelector, DelegateTo, RestrictArea}, net::ALPN, proto::{ + data_model::{Path, PathExt}, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, - willow::{Path, PathExt}, }, }; From f43e0b4dab23d53cf01b3cf6faac69ccab4bc74c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 00:49:00 +0200 Subject: [PATCH 129/198] fix: use their_max_payload_size --- iroh-willow/src/session/challenge.rs | 12 ++++++------ iroh-willow/src/session/payload.rs | 6 ++++++ iroh-willow/src/session/reconciler.rs | 22 +++++++++++----------- iroh-willow/src/session/run.rs | 1 + 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/iroh-willow/src/session/challenge.rs b/iroh-willow/src/session/challenge.rs index c3664128ef..79ecd1ca66 100644 --- a/iroh-willow/src/session/challenge.rs +++ b/iroh-willow/src/session/challenge.rs @@ -1,6 +1,6 @@ use super::{Error, Role}; use crate::proto::{ - keys::{UserPublicKey, UserSecretKey, UserSignature}, + keys::{UserPublicKey, UserSignature}, wgps::challenge::{AccessChallenge, AccessChallengeBytes, ChallengeHash}, }; @@ -57,11 +57,11 @@ impl ChallengeState { matches!(self, Self::Revealed { .. }) } - pub fn sign(&self, secret_key: &UserSecretKey) -> Result { - let signable = self.signable()?; - let signature = secret_key.sign(&signable); - Ok(signature) - } + // pub fn sign(&self, secret_key: &UserSecretKey) -> Result { + // let signable = self.signable()?; + // let signature = secret_key.sign(&signable); + // Ok(signature) + // } pub fn signable(&self) -> Result<[u8; 32], Error> { let challenge = self.get_ours()?; diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index 6c6565c072..d89cf4fb44 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -15,6 +15,12 @@ use super::Error; pub const DEFAULT_CHUNK_SIZE: usize = 1024 * 64; +/// Send a payload in chunks. +/// +/// Returns `true` if the payload was sent. +/// Returns `false` if blob is not found in `payload_store`. +/// Returns an error if the store or sending on the `senders` return an error. +// TODO: Include outboards. pub async fn send_payload_chunked( digest: PayloadDigest, payload_store: &P, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 173940fd50..fe76147607 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -25,7 +25,7 @@ use crate::{ session::{ aoi_finder::AoiIntersection, channels::{ChannelSenders, MessageReceiver}, - payload::{send_payload_chunked, CurrentPayload}, + payload::{send_payload_chunked, CurrentPayload, DEFAULT_CHUNK_SIZE}, static_tokens::StaticTokens, Error, Role, SessionId, }, @@ -73,6 +73,7 @@ impl Reconciler { session_id: SessionId, send: ChannelSenders, our_role: Role, + max_eager_payload_size: u64, ) -> impl futures_lite::Stream> { GenStream::new(|co| { let shared = Shared { @@ -82,6 +83,7 @@ impl Reconciler { send, static_tokens, session_id, + max_eager_payload_size, }; Self { shared, @@ -366,6 +368,7 @@ struct Shared { send: ChannelSenders, static_tokens: StaticTokens, session_id: SessionId, + max_eager_payload_size: u64, } #[derive(Debug)] @@ -563,7 +566,8 @@ impl Target { let static_token = token.capability.into(); let dynamic_token = token.signature; // TODO: partial payloads - let available = entry.payload_length(); + let payload_len = entry.payload_length(); + let available = payload_len; let static_token_handle = shared .static_tokens .bind_and_send_ours(static_token, &shared.send) @@ -577,21 +581,17 @@ impl Target { shared.send.send(msg).await?; // TODO: only send payload if configured to do so and/or under size limit. - let send_payloads = true; - let chunk_size = 1024 * 64; - if send_payloads - && send_payload_chunked( + if payload_len <= shared.max_eager_payload_size { + send_payload_chunked( digest, shared.store.payloads(), &shared.send, - chunk_size, + DEFAULT_CHUNK_SIZE, |bytes| ReconciliationSendPayload { bytes }.into(), ) - .await? - { - let msg = ReconciliationTerminatePayload; - shared.send.send(msg).await?; + .await?; } + shared.send.send(ReconciliationTerminatePayload).await?; } Ok(()) } diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 05035c432d..344e342af2 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -249,6 +249,7 @@ pub(crate) async fn run_session( session_id, channel_sender.clone(), our_role, + initial_transmission.their_max_payload_size, ); while let Some(output) = gen.try_next().await? { match output { From 435fbfdec3287b6f88ca7faece7a708a9b57621a Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 10:05:13 +0200 Subject: [PATCH 130/198] fix: simplify and fix graceful connection termination --- Cargo.lock | 1 + iroh-willow/Cargo.toml | 1 + iroh-willow/src/engine/peer_manager.rs | 69 ++++------ iroh-willow/src/net.rs | 176 ++++++++++--------------- iroh-willow/tests/basic.rs | 1 + 5 files changed, 100 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6017f1bbc5..7036ca9dd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3020,6 +3020,7 @@ dependencies = [ "iroh-blobs", "iroh-metrics", "iroh-net", + "iroh-quinn", "iroh-test", "postcard", "proptest", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index b4e03230ae..b1c97246ef 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -29,6 +29,7 @@ iroh-metrics = { version = "0.22.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.22.0", path = "../iroh-net" } iroh-blobs = { version = "0.22.0", path = "../iroh-blobs" } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } +quinn = { package = "iroh-quinn", version = "0.10.5" } rand = "0.8.5" rand_core = "0.6.4" redb = { version = "2.0.0" } diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index e5b2c7ec27..daea1d3539 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -307,6 +307,7 @@ impl PeerManager { let cancel_dial2 = cancel_dial.clone(); // Future that dials and establishes the connection. Can be cancelled for simultaneous connection. let fut = async move { + debug!("connecting"); let conn = tokio::select! { res = endpoint.connect_by_node_id(peer, ALPN) => res, _ = cancel_dial.cancelled() => { @@ -361,12 +362,12 @@ impl PeerManager { SessionEvent::Established => {} SessionEvent::Complete { result, - we_cancelled, senders, remaining_intents, mut update_receiver, + we_cancelled: _ } => { - trace!(error=?result.err(), ?we_cancelled, ?remaining_intents, "session complete"); + trace!(error=?result.err(), ?remaining_intents, "session complete"); // Close the channel senders. This will cause our send loops to close, // which in turn causes the receive loops of the other peer to close. @@ -393,8 +394,7 @@ impl PeerManager { old_intents: remaining_intents, new_intents, }; - // Store whether we initiated the termination. We will need this for the graceful termination logic later. - peer_info.we_cancelled = we_cancelled; + debug!("entering closing state"); } } } @@ -431,10 +431,10 @@ impl PeerManager { } } _ => { - warn!(?err, "connection failed"); let peer = self.peers.remove(&peer).expect("just checked"); match peer.state { PeerState::Pending { intents, .. } => { + warn!(?err, "connection failed while pending"); // If we were still in pending state, terminate all pending intents. let err = Arc::new(Error::Net(err)); join_all( @@ -448,6 +448,7 @@ impl PeerManager { old_intents, new_intents, } => { + debug!(?err, "connection failed to close gracefully"); // If we were are in closing state, we still forward the connection error to the intents. // This would be the place where we'd implement retries: instead of aborting the intents, resubmit them. // Right now, we only resubmit intents that were submitted while terminating a session, and only if the session closed gracefully. @@ -460,13 +461,20 @@ impl PeerManager { ) .await; } - _ => { - // TODO: Not sure if this is good practice? + PeerState::Active { .. } => { + // We do not care about intents here, they will be handled in the + // session (which will error as well because all channels are now + // closed). + warn!(?err, "connection failed while active"); + // TODO:(Frando): Not sure if this is good practice? // A `debug_assert` is far too much, because this can be triggered by other peers. // However in tests I want to make sure that *all* connections terminate gracefully. #[cfg(test)] panic!("connection failed: {err:?}"); } + PeerState::None => { + warn!(?err, "connection failed while peer is in None state"); + } } } } @@ -535,11 +543,9 @@ impl PeerManager { } Ok(ConnStep::Done { conn }) => { trace!("connection loop finished"); - let we_cancelled = peer_info.we_cancelled; - let me = self.endpoint.node_id(); let fut = async move { - let error = terminate_gracefully(&conn, me, peer, we_cancelled).await?; - Ok(ConnStep::Closed { conn, error }) + terminate_gracefully(&conn).await?; + Ok(ConnStep::Closed { conn }) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, &peer_info, fut); if let PeerState::Closing { .. } = &peer_info.state { @@ -548,42 +554,28 @@ impl PeerManager { // TODO: What do we do with the closing abort handle in case we have a new connection already? } } - Ok(ConnStep::Closed { error, conn }) => { - match &error { - None => debug!("connection closed gracefully"), - Some(error) => warn!(?error, "failed to close connection gracefully"), - } + Ok(ConnStep::Closed { conn }) => { + debug!("connection closed gracefully"); + drop(conn); + let peer_info = self.peers.remove(&peer).expect("just checked"); if let PeerState::Closing { - ref mut new_intents, - ref mut old_intents, + new_intents, + old_intents, } = peer_info.state { - if let Some(error) = error { - // If the connection did not close gracefully, terminate the pending intents with the connection error. - let err = Arc::new(Error::Net(error.into())); - join_all( - old_intents - .drain(..) - .map(|intent| intent.send_abort(err.clone())), - ) - .await; - } else { - // Otherwise, just drop the old intents. - let _ = old_intents.drain(..); - }; - let intents = std::mem::take(new_intents); - self.peers.remove(&peer); - if !intents.is_empty() { + drop(old_intents); + if !new_intents.is_empty() { debug!( "resubmitting {} intents that were not yet processed", - intents.len() + new_intents.len() ); - for intent in intents { + for intent in new_intents { self.submit_intent(peer, intent).await; } } + } else { + warn!(state=%peer_info.state, "reached closed step for peer in wrong state"); } - drop(conn); } } Ok(()) @@ -631,7 +623,6 @@ struct PeerInfo { abort_handle: Option, state: PeerState, span: Span, - we_cancelled: bool, } impl PeerInfo { @@ -642,7 +633,6 @@ impl PeerInfo { abort_handle: None, state: PeerState::None, span: error_span!("conn", peer=%peer.fmt_short()), - we_cancelled: false, } } } @@ -676,7 +666,6 @@ enum ConnStep { }, Closed { conn: Connection, - error: Option, }, } diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index ee8fcfe88b..eafb8c5190 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -4,7 +4,8 @@ use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; -use iroh_net::endpoint::{Connection, ConnectionError, RecvStream, SendStream, VarInt}; +use iroh_net::endpoint::{Connection, ConnectionError, ReadError, RecvStream, SendStream, VarInt}; +use quinn::ReadExactError; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{debug, trace}; @@ -72,7 +73,7 @@ pub(crate) async fn establish( our_nonce: AccessChallenge, ) -> Result<(InitialTransmission, ChannelStreams)> { debug!(?our_role, "establishing connection"); - // Run the initial transmission (which works on uni streams) concurrently + // Run the initial transmission (which uses uni streams) concurrently // with opening/accepting the bi streams for the channels. let fut = ( initial_transmission(conn, our_nonce), @@ -121,13 +122,13 @@ async fn open_channel_streams(conn: &Connection, our_role: Role) -> Result { Channel::all() - .map(|ch| { + .map(|channel| { let conn = conn.clone(); async move { let (mut send, recv) = conn.open_bi().await?; - send.write_u8(ch.id()).await?; - trace!(?ch, "opened bi stream"); - Ok::<_, anyhow::Error>((ch, send, recv)) + send.write_u8(channel.id()).await?; + trace!(?channel, "opened bi stream"); + Ok::<_, anyhow::Error>((channel, send, recv)) } }) .try_join() @@ -143,7 +144,7 @@ async fn open_channel_streams(conn: &Connection, our_role: Role) -> Result R } trace!("recv: stream close"); channel_writer.close(); - trace!("recv: loop close"); + trace!("recv: done"); Ok(()) } @@ -275,103 +276,66 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul Ok(()) } -/// Terminate a connection gracefully. +/// Terminates a connection gracefully. /// -/// QUIC does not allow us to rely on stream terminations, because those only signal -/// reception in the peer's QUIC stack, not in the application. Closing a QUIC connection -/// triggers immediate termination, so to make sure that all data was actually processed -/// by our session, we exchange a single byte over a pair of uni streams. As this is the only -/// use of uni streams after the initial connection handshake, we do not have to identify the -/// streams specifically. +/// This function should be called after all bidirectional streams are terminated (`finish` called +/// for send streams and `read_to_end` awaited for recv stream) and no further streams will be +/// opened or accepted by the application. /// -/// This function may only be called once the session processing has fully terminated and all -/// WGPS streams are closed (for send streams) and read to end (for recv streams) on our side. +/// It will send a goodbye byte over a newly opened uni channel, and wait for the other peer to +/// confirm either by sending a goodbye byte as well or closing the connection with +/// [`ERROR_CODE_OK`], signalling that our goodbye byte was received. /// -/// `we_cancelled` is a boolean indicating whether we are terminating the connection after -/// we willfully terminated or completed our session. Pass `false` if the session terminated -/// because the other peer closed their WGPS streams. +/// We will only close the connection after having received the goodbye byte, or after the other +/// peer closed the connection. /// -/// If only one peer indicated that they initiated the termination by setting `we_cancelled` -/// to `true`, this peer will *not* close the connection, but instead wait for the other peer -/// to close the connection. -/// If both peers indicated that they initiated the termination, the peer with the higher node id -/// will close the connection first. -/// If none of the peers said they closed, which likely is a bug in the implementation, both peers -/// will close the connection. +/// This flow guarantees that neither peer will close the connection too early. /// /// A connection is considered to be closed gracefully if and only if this procedure is run to end /// successfully, and if the connection is closed with the expected error code. /// -/// Returns an error if the termination flow was aborted prematurely. -/// Returns a [`ConnectionError] if the termination flow was completed successfully, but the connection -/// was not closed with the expected error code. -pub(crate) async fn terminate_gracefully( - conn: &Connection, - me: NodeId, - peer: NodeId, - we_cancelled: bool, -) -> Result> { - trace!(?we_cancelled, "terminating connection"); - let send = async { - let mut send_stream = conn.open_uni().await?; - let data = if we_cancelled { 1u8 } else { 0u8 }; - send_stream.write_u8(data).await?; - send_stream.finish().await?; - Ok(()) - }; - - let recv = async { - let mut recv_stream = conn.accept_uni().await?; - let data = recv_stream.read_u8().await?; - recv_stream.read_to_end(0).await?; - let they_cancelled = match data { - 0 => false, - 1 => true, - _ => return Err(anyhow!("received unexpected closing byte from peer")), - }; - Ok(they_cancelled) - }; - - let send_and_recv = (send, recv).try_join(); - let (_, they_cancelled) = tokio::time::timeout(SHUTDOWN_TIMEOUT, send_and_recv).await??; - - #[derive(Debug)] - enum WhoCancelled { - WeDid, - TheyDid, - BothDid, - NoneDid, - } - - let who_cancelled = match (we_cancelled, they_cancelled) { - (true, false) => WhoCancelled::WeDid, - (false, true) => WhoCancelled::TheyDid, - (true, true) => WhoCancelled::BothDid, - (false, false) => WhoCancelled::NoneDid, - }; +/// Returns an error if the termination flow was aborted prematurely or if the connection was not +/// closed with the expected error code. +pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { + trace!("terminating connection"); + // Send a single byte on a newly opened uni stream. + let mut send_stream = conn.open_uni().await?; + send_stream.write_u8(1).await?; + send_stream.finish().await?; + // Wait until we either receive the goodbye byte from the other peer, or for the other peer + // to close the connection with the expected error code. + wait_for_goodbye_or_graceful_close(conn).await?; + // Only now close the connection. + conn.close(ERROR_CODE_OK, b"bye"); + trace!("connection terminated gracefully"); + Ok(()) +} - let we_close_first = match who_cancelled { - WhoCancelled::WeDid => false, - WhoCancelled::TheyDid => true, - WhoCancelled::BothDid => me > peer, - WhoCancelled::NoneDid => true, - }; - debug!(?who_cancelled, "connection complete"); - if we_close_first { - conn.close(ERROR_CODE_OK, b"bye"); - } - let reason = conn.closed().await; - let is_graceful = match &reason { - ConnectionError::LocallyClosed if we_close_first => true, - ConnectionError::ApplicationClosed(frame) if frame.error_code == ERROR_CODE_OK => { - !we_close_first || matches!(who_cancelled, WhoCancelled::NoneDid) +async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { + let mut recv_stream = match conn.accept_uni().await { + // The other peer closed the connection with the expected error code: They received our + // goodbye byte after having sent theirs. We're free to close the connection. + Err(ConnectionError::ApplicationClosed(frame)) if frame.error_code == ERROR_CODE_OK => { + return Ok(()) } - _ => false, + // The peer closed the connection with an unexpected error coe. + Err(err) => return Err(err.into()), + Ok(stream) => stream, }; - if !is_graceful { - Ok(Some(reason)) - } else { - Ok(None) + let mut buf = [0u8]; + match recv_stream.read_exact(&mut buf).await { + // We received the goodbye byte: the other peer signals having read everything, we're freee + // to close the connection. + Ok(()) if buf == [1u8] => Ok(()), + // The other peer closed the connection with the expected error code: They received our + // goodbye byte after having sent theirs. We're free to close the connection. + Err(ReadExactError::ReadError(ReadError::ConnectionLost( + ConnectionError::ApplicationClosed(frame), + ))) if frame.error_code == ERROR_CODE_OK => Ok(()), + // The peer has sent invalid data on the goodbye stream. + Ok(()) => Err(anyhow!("received unexpected closing byte from peer")), + // The peer closed the connection with an unexpected error coe. + Err(err) => Err(err.into()), } } @@ -552,8 +516,8 @@ mod tests { info!(time=?start.elapsed(), "reconciliation finished"); - let (senders_alfie, alfie_cancelled) = res_alfie.unwrap(); - let (senders_betty, betty_cancelled) = res_betty.unwrap(); + let (senders_alfie, _alfie_cancelled) = res_alfie.unwrap(); + let (senders_betty, _betty_cancelled) = res_betty.unwrap(); senders_alfie.close_all(); senders_betty.close_all(); @@ -562,13 +526,11 @@ mod tests { r1.unwrap(); r2.unwrap(); - let (error_alfie, error_betty) = tokio::try_join!( - terminate_gracefully(&conn_alfie, node_id_alfie, node_id_betty, alfie_cancelled), - terminate_gracefully(&conn_betty, node_id_betty, node_id_alfie, betty_cancelled), + tokio::try_join!( + terminate_gracefully(&conn_alfie), + terminate_gracefully(&conn_betty), ) .expect("failed to close both connections gracefully"); - assert_eq!(error_alfie, None); - assert_eq!(error_betty, None); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; let betty_entries = get_entries(&handle_betty, namespace_id).await?; @@ -708,14 +670,14 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; session_alfie.close(); - let (senders_alfie, alfie_cancelled) = session_alfie + let (senders_alfie, _alfie_cancelled) = session_alfie .complete() .await .expect("failed to close alfie session"); info!("close alfie session"); senders_alfie.close_all(); - let (senders_betty, betty_cancelled) = session_betty + let (senders_betty, _betty_cancelled) = session_betty .complete() .await .expect("failed to close alfie session"); @@ -736,13 +698,11 @@ mod tests { assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); - let (error_alfie, error_betty) = tokio::try_join!( - terminate_gracefully(&conn_alfie, node_id_alfie, node_id_betty, alfie_cancelled), - terminate_gracefully(&conn_betty, node_id_betty, node_id_alfie, betty_cancelled), + tokio::try_join!( + terminate_gracefully(&conn_alfie), + terminate_gracefully(&conn_betty), ) .expect("failed to close both connections gracefully"); - assert_eq!(error_alfie, None); - assert_eq!(error_betty, None); info!("alfie session res {:?}", res_alfie); info!("betty session res {:?}", res_betty); diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index ef95b8bcea..8323568472 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -309,6 +309,7 @@ mod util { ) -> Result { let endpoint = Endpoint::builder() .secret_key(secret_key) + .relay_mode(iroh_net::relay::RelayMode::Disabled) .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; From 8b5e26e6aaa068b8c4acd447179dc4f6ada223d7 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 11:59:21 +0200 Subject: [PATCH 131/198] fix: close connection on error too --- iroh-willow/src/net.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index eafb8c5190..5c73c54437 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -31,6 +31,9 @@ pub const CHANNEL_CAP: usize = 1024 * 64; /// The ALPN protocol name for iroh-willow. pub const ALPN: &[u8] = b"iroh-willow/0"; +/// QUIC application error code for closing with failure. +pub const ERROR_CODE_FAIL: VarInt = VarInt::from_u32(0); + /// QUIC application error code for graceful connection termination. pub const ERROR_CODE_OK: VarInt = VarInt::from_u32(1); @@ -304,11 +307,18 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { send_stream.finish().await?; // Wait until we either receive the goodbye byte from the other peer, or for the other peer // to close the connection with the expected error code. - wait_for_goodbye_or_graceful_close(conn).await?; - // Only now close the connection. - conn.close(ERROR_CODE_OK, b"bye"); - trace!("connection terminated gracefully"); - Ok(()) + match wait_for_goodbye_or_graceful_close(conn).await { + Ok(()) => { + conn.close(ERROR_CODE_OK, b"bye"); + trace!("connection terminated gracefully"); + Ok(()) + }, + Err(err) => { + conn.close(ERROR_CODE_FAIL, b"peer-failed-while-closing"); + trace!(?err, "connection failed while terminating"); + Err(err) + } + } } async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { From 0c518c81b7173def308e7517491e4d36ab259686 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 12:18:45 +0200 Subject: [PATCH 132/198] fix: don't use 0 error code --- iroh-willow/src/net.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 5c73c54437..1e85c7fc0a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -32,16 +32,16 @@ pub const CHANNEL_CAP: usize = 1024 * 64; pub const ALPN: &[u8] = b"iroh-willow/0"; /// QUIC application error code for closing with failure. -pub const ERROR_CODE_FAIL: VarInt = VarInt::from_u32(0); +pub const ERROR_CODE_FAIL: VarInt = VarInt::from_u32(1); /// QUIC application error code for graceful connection termination. -pub const ERROR_CODE_OK: VarInt = VarInt::from_u32(1); +pub const ERROR_CODE_OK: VarInt = VarInt::from_u32(2); /// QUIC application error code for closing connections because another connection is preferred. -pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(2); +pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(3); /// QUIC application error code when closing connection because our node is shutting down. -pub const ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(3); +pub const ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(4); pub const ESTABLISH_TIMEOUT: Duration = Duration::from_secs(10); pub const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); @@ -312,9 +312,9 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { conn.close(ERROR_CODE_OK, b"bye"); trace!("connection terminated gracefully"); Ok(()) - }, + } Err(err) => { - conn.close(ERROR_CODE_FAIL, b"peer-failed-while-closing"); + conn.close(ERROR_CODE_FAIL, b"failed-while-closing"); trace!(?err, "connection failed while terminating"); Err(err) } From 542762d6cd955773823ae17739515c6dae3aa919 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 12:39:06 +0200 Subject: [PATCH 133/198] chore: fmt --- iroh-willow/src/engine/peer_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index daea1d3539..5f3fcf2ff5 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -365,7 +365,7 @@ impl PeerManager { senders, remaining_intents, mut update_receiver, - we_cancelled: _ + we_cancelled: _, } => { trace!(error=?result.err(), ?remaining_intents, "session complete"); From 87a11e9852972292510b2adc1a33654f278cd47e Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 12:42:54 +0200 Subject: [PATCH 134/198] fix: update willow-rs fork --- Cargo.lock | 3 +++ Cargo.toml | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d9779ed00..ff91ff8cf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3238,6 +3238,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" dependencies = [ "either", "signature", @@ -6637,6 +6638,7 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" dependencies = [ "bytes", "either", @@ -6648,6 +6650,7 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" +source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" dependencies = [ "either", "syncify", diff --git a/Cargo.toml b/Cargo.toml index 02836da9de..4454bb7bd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,6 @@ unused-async = "warn" # willow-data-model = { path = "../willow-rs/data-model" } # willow-encoding = { path = "../willow-rs/encoding" } # meadowcap = { path = "../willow-rs/meadowcap" } -willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } -meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh" } +willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } +willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } +meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } From b9fdb49bca57ade5edb687b70fe80b8da9867f2d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 13:40:59 +0200 Subject: [PATCH 135/198] fix: typos, more docs, clippy --- iroh-willow/src/net.rs | 19 ++++++++++++++----- iroh-willow/src/proto/keys.rs | 2 +- iroh-willow/src/session/capabilities.rs | 2 +- iroh-willow/src/session/error.rs | 2 +- iroh-willow/src/session/reconciler.rs | 1 + iroh-willow/src/session/run.rs | 2 +- iroh-willow/src/util.rs | 2 +- 7 files changed, 20 insertions(+), 10 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 05be05fbdc..0e7280630a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -321,6 +321,15 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { } } +/// Waits for a goodbye byte or connection close, and then closes the connection. +/// +/// Accepts a single uni stream and reads a single byte on it. +/// +/// Returns once we received the goodbye byte or if the peer closed the connection with the +/// graceful error code. +/// +/// Returns an error if the connection was closed without the graceful error code or if reading the +/// goodbye byte failed. async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { let mut recv_stream = match conn.accept_uni().await { // The other peer closed the connection with the expected error code: They received our @@ -334,16 +343,16 @@ async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { }; let mut buf = [0u8]; match recv_stream.read_exact(&mut buf).await { - // We received the goodbye byte: the other peer signals having read everything, we're freee - // to close the connection. + // We received the goodbye byte: the other peer indicates to us that they are finished with + // everything and we are free to close the connection. Ok(()) if buf == [1u8] => Ok(()), // The other peer closed the connection with the expected error code: They received our - // goodbye byte after having sent theirs. We're free to close the connection. + // goodbye byte, and reacted by closing the connection. We're free to close too. Err(ReadExactError::ReadError(ReadError::ConnectionLost( ConnectionError::ApplicationClosed(frame), ))) if frame.error_code == ERROR_CODE_OK => Ok(()), // The peer has sent invalid data on the goodbye stream. - Ok(()) => Err(anyhow!("received unexpected closing byte from peer")), + Ok(()) => Err(anyhow!("Received unexpected closing byte from peer.")), // The peer closed the connection with an unexpected error coe. Err(err) => Err(err.into()), } @@ -619,7 +628,7 @@ mod tests { let start = Instant::now(); let (done_tx, done_rx) = tokio::sync::oneshot::channel(); - // alfie insert 3 enries after waiting a second + // alfie insert 3 entries after waiting a second let _insert_task_alfie = tokio::task::spawn({ let handle_alfie = handle_alfie.clone(); let count = 3; diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 30c348042d..13c06ff844 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -61,7 +61,7 @@ fn is_communal(pubkey_bytes: &[u8; 32]) -> bool { pub enum NamespaceKind { /// Communal namespace, needs [`super::meadowcap::CommunalCapability`] to authorizse. Communal, - /// Owned namespace, neads [`super::meadowcap::OwnedCapability`] to authorize. + /// Owned namespace, needs [`super::meadowcap::OwnedCapability`] to authorize. Owned, } diff --git a/iroh-willow/src/session/capabilities.rs b/iroh-willow/src/session/capabilities.rs index ef19cf6c80..d339613a23 100644 --- a/iroh-willow/src/session/capabilities.rs +++ b/iroh-willow/src/session/capabilities.rs @@ -145,7 +145,7 @@ impl Capabilities { Ok(()) } - pub fn sign_subspace_capabiltiy( + pub fn sign_subspace_capability( &self, secrets: &S, cap: SubspaceCapability, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 0c4450b04d..502a4fcb9f 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -51,7 +51,7 @@ pub enum Error { UnsupportedMessage, #[error("received a message that is intended for another channel")] WrongChannel, - #[error("the received nonce does not match the received committment")] + #[error("the received nonce does not match the received commitment")] BrokenCommittement, #[error("received an actor message for unknown session")] SessionNotFound, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index fe76147607..5328d42c2a 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -65,6 +65,7 @@ impl Reconciler { /// Run the [`Reconciler`]. /// /// The returned stream is a generator, so it must be polled repeatedly to progress. + #[allow(clippy::too_many_arguments)] pub fn run_gen( inbox: Cancelable>, store: Store, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 344e342af2..a6a3f73c75 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -231,7 +231,7 @@ pub(crate) async fn run_session( .await?; } Output::SignAndSendSubspaceCap(handle, cap) => { - let message = caps.sign_subspace_capabiltiy(store.secrets(), cap, handle)?; + let message = caps.sign_subspace_capability(store.secrets(), cap, handle)?; channel_sender.send(Box::new(message)).await?; } } diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index f83462e079..914072c230 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -1,4 +1,4 @@ -//! Various utilties and data structures used in this crate. +//! Various utilities and data structures used in this crate. pub mod channel; pub mod codec; From 5e1a613a6c58f519e1b57ef36b0e7baae18e7aed Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 13:59:37 +0200 Subject: [PATCH 136/198] chore: allow public domain license and n0 repos, update yanked crates --- Cargo.lock | 8 ++++---- deny.toml | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0dc2eceae..6aa96b94a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -539,9 +539,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" dependencies = [ "serde", ] @@ -3797,9 +3797,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", diff --git a/deny.toml b/deny.toml index 5230025622..99b5218ac4 100644 --- a/deny.toml +++ b/deny.toml @@ -19,6 +19,7 @@ allow = [ "Unicode-DFS-2016", "Zlib", "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ + "CC-PDDC" # https://spdx.org/licenses/CC-PDDC.html ] [[licenses.clarify]] @@ -31,3 +32,7 @@ license-files = [ [advisories] ignore = [ ] + +# TODO(Frando): added for iroh-willow development, maybe remove again before release? +[sources.allow-org] +github = ["n0-computer", "earthstar-project"] From 0de4c012b73becb95ce1252e5cb3e2b961c3f770 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 14:24:49 +0200 Subject: [PATCH 137/198] fix: properly create owned vs communal namespaces --- iroh-willow/src/proto/keys.rs | 23 ++++++++++++----------- iroh-willow/src/proto/meadowcap.rs | 8 +------- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 13c06ff844..414e8e8511 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -13,6 +13,8 @@ use iroh_base::base32; use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; +use super::meadowcap::IsCommunal; + pub const PUBLIC_KEY_LENGTH: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; pub const SECRET_KEY_LENGTH: usize = ed25519_dalek::SECRET_KEY_LENGTH; pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; @@ -45,12 +47,16 @@ macro_rules! bytestring { }; } -/// Returns `true` if the last bit of a byte slice is 1, which defines a communal namespace in this -/// willow implementation. -fn is_communal(pubkey_bytes: &[u8; 32]) -> bool { - let last = pubkey_bytes.last().expect("pubkey is not empty"); - // Check if last bit is 1. - (*last & 0x1) == 0x1 +impl IsCommunal for NamespaceId { + fn is_communal(&self) -> bool { + self.as_bytes()[31] == 0 + } +} + +impl IsCommunal for NamespacePublicKey{ + fn is_communal(&self) -> bool { + self.id().is_communal() + } } /// The type of the namespace, either communal or owned. @@ -121,11 +127,6 @@ pub struct NamespacePublicKey(VerifyingKey); bytestring!(NamespacePublicKey, PUBLIC_KEY_LENGTH); impl NamespacePublicKey { - /// Whether this is the key for a communal namespace. - pub fn is_communal(&self) -> bool { - is_communal(self.as_bytes()) - } - pub fn kind(&self) -> NamespaceKind { if self.is_communal() { NamespaceKind::Communal diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 3249e66854..773a060c02 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -15,7 +15,7 @@ pub type NamespaceSignature = keys::NamespaceSignature; use super::data_model::{Entry, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH}; -pub use meadowcap::AccessMode; +pub use meadowcap::{AccessMode, IsCommunal}; #[derive(Debug, derive_more::From)] pub enum SecretKey { @@ -54,12 +54,6 @@ pub type McAuthorisationToken = meadowcap::McAuthorisationToken< keys::UserSignature, >; -impl meadowcap::IsCommunal for NamespaceId { - fn is_communal(&self) -> bool { - self.as_bytes()[31] == 0 - } -} - pub fn is_authorised_write(entry: &Entry, token: &McAuthorisationToken) -> bool { token.is_authorised_write(entry) } From cd25c48a01656d6323dff14d2195c7ec44ec6f25 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 14:41:21 +0200 Subject: [PATCH 138/198] fix: debugging things --- iroh-willow/src/engine/actor.rs | 2 +- iroh-willow/src/engine/peer_manager.rs | 17 ++++++++--------- iroh-willow/src/session/run.rs | 5 ++++- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index de5eba52fc..1d78c3639a 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -364,7 +364,7 @@ impl Actor { self.tasks.spawn_local(async move { if let Err(err) = future.await { - tracing::debug!(?peer, ?session_id, ?err, "session failed"); + debug!(?peer, ?session_id, ?err, "session failed"); } }); diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 39d29ecc13..e9ef8c5b23 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -22,8 +22,7 @@ use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ interest::Interests, net::{ - establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, - ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_SHUTDOWN, + establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_FAIL, ERROR_CODE_SHUTDOWN }, proto::wgps::challenge::AccessChallenge, session::{ @@ -395,7 +394,7 @@ impl PeerManager { old_intents: remaining_intents, new_intents, }; - debug!("entering closing state"); + trace!("entering closing state"); } } } @@ -489,6 +488,7 @@ impl PeerManager { ref mut intents, .. } = &mut peer_info.state else { + conn.close(ERROR_CODE_FAIL, b"invalid-state"); drop(conn); // TODO: unreachable? return Err(anyhow!( @@ -546,7 +546,9 @@ impl PeerManager { trace!("connection loop finished"); let fut = async move { terminate_gracefully(&conn).await?; - Ok(ConnStep::Closed { conn }) + // The connection is fully closed. + drop(conn); + Ok(ConnStep::Closed) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); if let PeerState::Closing { .. } = &peer_info.state { @@ -555,9 +557,8 @@ impl PeerManager { // TODO: What do we do with the closing abort handle in case we have a new connection already? } } - Ok(ConnStep::Closed { conn }) => { + Ok(ConnStep::Closed) => { debug!("connection closed gracefully"); - drop(conn); let peer_info = self.peers.remove(&peer).expect("just checked"); if let PeerState::Closing { new_intents, @@ -665,9 +666,7 @@ enum ConnStep { Done { conn: Connection, }, - Closed { - conn: Connection, - }, + Closed, } /// The internal handlers for the [`AcceptOpts]. diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index a6a3f73c75..b0097fd153 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -480,7 +480,10 @@ async fn with_span( async { trace!("start"); let res = fut.await; - trace!(?res, "done"); + match &res { + Ok(_) => trace!("done"), + Err(err) => debug!(?err, "session task failed"), + } res } .instrument(span) From 085913ee1691f5f345c62c2f43b3c7ee3a162c05 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 14:41:35 +0200 Subject: [PATCH 139/198] chore: fmt --- iroh-willow/src/engine/peer_manager.rs | 3 ++- iroh-willow/src/proto/keys.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index e9ef8c5b23..61a7b96142 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -22,7 +22,8 @@ use tracing::{debug, error_span, instrument, trace, warn, Instrument, Span}; use crate::{ interest::Interests, net::{ - establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_FAIL, ERROR_CODE_SHUTDOWN + establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, + ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_FAIL, ERROR_CODE_SHUTDOWN, }, proto::wgps::challenge::AccessChallenge, session::{ diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 414e8e8511..606b25a736 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -53,7 +53,7 @@ impl IsCommunal for NamespaceId { } } -impl IsCommunal for NamespacePublicKey{ +impl IsCommunal for NamespacePublicKey { fn is_communal(&self) -> bool { self.id().is_communal() } From e9d2d84430c450086100232136b7ddd8954b4c8d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 14:42:38 +0200 Subject: [PATCH 140/198] chore: cargo deny updates --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6aa96b94a4..ee9a441e21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3239,7 +3239,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" dependencies = [ "either", "signature", @@ -3808,9 +3808,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", @@ -3818,9 +3818,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", @@ -3831,9 +3831,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", @@ -6639,7 +6639,7 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" dependencies = [ "bytes", "either", @@ -6651,7 +6651,7 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" -source = "git+https://github.com/Frando/willow-rs.git?branch=iroh-dev#1785dc6c4e62dbc7c58d56a989345eacd6954d95" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" dependencies = [ "either", "syncify", diff --git a/Cargo.toml b/Cargo.toml index 4454bb7bd4..849ae3f450 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,6 @@ unused-async = "warn" # willow-data-model = { path = "../willow-rs/data-model" } # willow-encoding = { path = "../willow-rs/encoding" } # meadowcap = { path = "../willow-rs/meadowcap" } -willow-data-model = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } -willow-encoding = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } -meadowcap = { git = "https://github.com/Frando/willow-rs.git", branch = "iroh-dev" } +willow-data-model = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } +willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } +meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } From 01e253d0e9b40c315f1c5a61d59e2a0033c5de1b Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 14:49:10 +0200 Subject: [PATCH 141/198] chore: deny.toml fix --- deny.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deny.toml b/deny.toml index 99b5218ac4..e37ade2ac9 100644 --- a/deny.toml +++ b/deny.toml @@ -35,4 +35,4 @@ ignore = [ # TODO(Frando): added for iroh-willow development, maybe remove again before release? [sources.allow-org] -github = ["n0-computer", "earthstar-project"] +github = ["n0-computer"] From b8061b87bdb80b29e9375812a449033c9c916a0e Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:49:03 +0200 Subject: [PATCH 142/198] feat: big round of docs updates and pub exposure changes --- Cargo.lock | 6 +- iroh-willow/src/engine.rs | 39 +++++++++- iroh-willow/src/engine/peer_manager.rs | 4 +- iroh-willow/src/interest.rs | 34 +++++++-- iroh-willow/src/lib.rs | 12 +--- iroh-willow/src/net.rs | 26 ++++--- iroh-willow/src/proto.rs | 7 ++ iroh-willow/src/proto/data_model.rs | 8 +++ iroh-willow/src/proto/grouping.rs | 2 + iroh-willow/src/proto/meadowcap.rs | 4 ++ iroh-willow/src/proto/wgps.rs | 12 ++-- iroh-willow/src/session.rs | 98 ++++++++++++++++---------- iroh-willow/src/session/aoi_finder.rs | 6 +- iroh-willow/src/session/challenge.rs | 2 +- iroh-willow/src/session/intents.rs | 27 ++++++- iroh-willow/src/session/pai_finder.rs | 4 +- iroh-willow/src/session/resource.rs | 11 +++ iroh-willow/src/session/run.rs | 4 +- iroh-willow/src/store.rs | 17 +++-- iroh-willow/src/store/auth.rs | 4 ++ iroh-willow/src/store/memory.rs | 7 ++ iroh-willow/src/store/traits.rs | 10 +++ iroh-willow/tests/basic.rs | 4 +- 23 files changed, 261 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee9a441e21..03c91f4fec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3239,7 +3239,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" dependencies = [ "either", "signature", @@ -6639,7 +6639,7 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" dependencies = [ "bytes", "either", @@ -6651,7 +6651,7 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#f04f641b319e5a29c589f033154a92670b63472c" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" dependencies = [ "either", "syncify", diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index e37e38f410..51ec3cbd70 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -16,11 +16,18 @@ mod peer_manager; use self::peer_manager::PeerManager; -pub use self::actor::ActorHandle; +pub(crate) use self::actor::ActorHandle; pub use self::peer_manager::AcceptOpts; const PEER_MANAGER_INBOX_CAP: usize = 128; +/// The [`Engine`] is the main handle onto a Willow store with networking. +/// +/// It runs a dedicated thread for all storage operations, and a peer manager to coordinate network +/// connections to other peers. +/// +/// The engine does not establish any peer connections on its own. Synchronisation sessions can be +/// started with [`Engine::sync_with_peer`]. #[derive(Debug, Clone)] pub struct Engine { actor_handle: ActorHandle, @@ -29,6 +36,20 @@ pub struct Engine { } impl Engine { + /// Start the Willow engine. + /// + /// This needs an `endpoint` to connect to other peers, and a `create_store` closure which + /// returns a [`Storage`] instance. + /// + /// You also need to pass [`AcceptOpts`] to configure what to do with incoming connections. + /// Its default implementation will accept all connections and run sync with all our interests. + /// + /// To actually accept connections, an [`Endpoint::accept`] loop has to be run outside of the + /// engine, passing all connections that match [`crate::net::ALPN`] to the engine with + /// [`Engine::handle_connection`]. + /// + /// The engine will spawn a dedicated storage thread, and the `create_store` closure will be called on + /// this thread, so that the [`Storage`] does not have to be `Send`. pub fn spawn( endpoint: Endpoint, create_store: impl 'static + Send + FnOnce() -> S, @@ -50,6 +71,7 @@ impl Engine { } } + /// Handle an incoming connection. pub async fn handle_connection(&self, conn: Connection) -> Result<()> { self.peer_manager_inbox .send(peer_manager::Input::HandleConnection { conn }) @@ -57,6 +79,17 @@ impl Engine { Ok(()) } + /// Synchronises with a peer. + /// + /// Will try to establish a connection to `peer` if there is none already, and then open a + /// synchronisation session. + /// + /// `init` contains the initialisation options for this synchronisation intent. + /// + /// Returns an [`IntentHandle`] which receives events and can submit udpates into the session. + /// + /// This can freely be called multiple times for the same peer. The engine will merge the + /// intents and make sure that only a single session is opened per peer. pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { let (intent, handle) = Intent::new(init); self.peer_manager_inbox @@ -65,6 +98,10 @@ impl Engine { Ok(handle) } + /// Shutdown the engine. + /// + /// This will try to close all connections gracefully for up to 10 seconds, + /// and abort them otherwise. pub async fn shutdown(self) -> Result<()> { debug!("shutdown engine"); let (reply, reply_rx) = oneshot::channel(); diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 61a7b96142..46ba6ae660 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -25,7 +25,7 @@ use crate::{ establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_FAIL, ERROR_CODE_SHUTDOWN, }, - proto::wgps::challenge::AccessChallenge, + proto::wgps::AccessChallenge, session::{ intents::{EventKind, Intent}, Error, InitialTransmission, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, @@ -366,7 +366,7 @@ impl PeerManager { senders, remaining_intents, mut update_receiver, - we_cancelled: _, + .. } => { trace!(error=?result.err(), ?remaining_intents, "session complete"); diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index 74105bbafc..c71a8e23ac 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -1,3 +1,5 @@ +//! Types for defining synchronisation interests. + use std::collections::{hash_map, HashMap, HashSet}; use serde::{Deserialize, Serialize}; @@ -13,27 +15,37 @@ use crate::proto::{ pub type InterestMap = HashMap>; +/// Enum for describing synchronisation interests. +/// +/// You should use [`Self::builder`] for a straightforward way to construct this. #[derive(Debug, Default, Clone)] pub enum Interests { + /// Use all the capabilities we have. #[default] All, + /// Use the selected capabilities and areas. Select(HashMap), + /// Use exactly the specified capabilities and areas. Exact(InterestMap), } impl Interests { - pub fn builder() -> SelectBuilder { - SelectBuilder::default() + /// Returns a [`SelectBuilder`] to build our [`Interests`]. + pub fn builder() -> InterestBuilder { + InterestBuilder::default() } + /// Creates interests that include all our capabilities. pub fn all() -> Self { Self::All } } +/// Builder for [`Interests`]. #[derive(Default, Debug)] -pub struct SelectBuilder(HashMap); +pub struct InterestBuilder(HashMap); +/// Helper trait to accept both [`Area`] and [`AreaOfInterest`] in the [`InterestBuilder`]. pub trait IntoAreaOfInterest { fn into_area_of_interest(self) -> AreaOfInterest; } @@ -50,13 +62,19 @@ impl IntoAreaOfInterest for Area { } } -impl SelectBuilder { +impl InterestBuilder { + /// Add the full area of a capability we have into the interests. + /// + /// See [`CapSelector`] for how to specifiy the capability to use. pub fn add_full_cap(mut self, cap: impl Into) -> Self { let cap = cap.into(); self.0.insert(cap, AreaOfInterestSelector::Widest); self } + /// Add a specific area included in one of our capabilities into the interests. + /// + /// See [`CapSelector`] for how to specifiy the capability to use. pub fn add_area( mut self, cap: impl Into, @@ -77,21 +95,25 @@ impl SelectBuilder { self } + /// Converts this builder into [`Interests`]. pub fn build(self) -> Interests { Interests::Select(self.0) } } -impl From for Interests { - fn from(builder: SelectBuilder) -> Self { +impl From for Interests { + fn from(builder: InterestBuilder) -> Self { builder.build() } } +/// Selector for an [`AreaOfInterest`]. #[derive(Debug, Default, Clone)] pub enum AreaOfInterestSelector { + /// Use the widest area allowed by a capability, with no further limits. #[default] Widest, + /// Use the specified set of [`AreaOfInterest`]. Exact(HashSet), } diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index 5473ba4476..a8bab71daf 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -6,18 +6,10 @@ pub mod engine; pub mod form; pub mod interest; -pub mod net; +pub(crate) mod net; pub mod proto; pub mod session; pub mod store; pub mod util; -/// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, -/// and the other peer as Betty. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum Role { - /// The peer that initiated the synchronisation session. - Alfie, - /// The peer that accepted the synchronisation session. - Betty, -} +pub use net::ALPN; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 0e7280630a..89b6b21f7f 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,3 +1,5 @@ +//! Networking implementation for iroh-willow. + use std::{future::Future, time::Duration}; use anyhow::{anyhow, ensure, Context as _, Result}; @@ -26,7 +28,8 @@ use crate::{ }, }; -pub const CHANNEL_CAP: usize = 1024 * 64; +/// Default capacity for the in-memory pipes between networking and session. +const CHANNEL_CAP: usize = 1024 * 64; /// The ALPN protocol name for iroh-willow. pub const ALPN: &[u8] = b"iroh-willow/0"; @@ -43,8 +46,10 @@ pub const ERROR_CODE_DUPLICATE_CONN: VarInt = VarInt::from_u32(3); /// QUIC application error code when closing connection because our node is shutting down. pub const ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(4); -pub const ESTABLISH_TIMEOUT: Duration = Duration::from_secs(10); -pub const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); +/// Timeout until we abort a connection attempt. +const ESTABLISH_TIMEOUT: Duration = Duration::from_secs(10); +/// Timeout until we abort a graceful termination attempt. +const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); /// The handle to an active peer connection. /// @@ -307,17 +312,22 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { send_stream.finish().await?; // Wait until we either receive the goodbye byte from the other peer, or for the other peer // to close the connection with the expected error code. - match wait_for_goodbye_or_graceful_close(conn).await { - Ok(()) => { + match tokio::time::timeout(SHUTDOWN_TIMEOUT, wait_for_goodbye_or_graceful_close(conn)).await { + Ok(Ok(())) => { conn.close(ERROR_CODE_OK, b"bye"); trace!("connection terminated gracefully"); Ok(()) } - Err(err) => { + Ok(Err(err)) => { conn.close(ERROR_CODE_FAIL, b"failed-while-closing"); trace!(?err, "connection failed while terminating"); Err(err) } + Err(err) => { + conn.close(ERROR_CODE_FAIL, b"timeout-while-closing"); + trace!("connection timed out while terminating"); + Err(err.into()) + } } } @@ -653,8 +663,8 @@ mod tests { } }); - let init_alfie = SessionInit::new(Interests::All, SessionMode::Live); - let init_betty = SessionInit::new(Interests::All, SessionMode::Live); + let init_alfie = SessionInit::new(Interests::All, SessionMode::Continous); + let init_betty = SessionInit::new(Interests::All, SessionMode::Continous); let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); diff --git a/iroh-willow/src/proto.rs b/iroh-willow/src/proto.rs index da67bdad63..046dfc5161 100644 --- a/iroh-willow/src/proto.rs +++ b/iroh-willow/src/proto.rs @@ -1,3 +1,10 @@ +//! Protocol data types used in willow. +//! +//! These are mostly type aliases onto [`willow-rs`] types, with some additional helpers. +//! +//! This module also contains the crypthographic primitives for fingerprints and private area +//! intersection. + pub mod data_model; pub mod grouping; pub mod keys; diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 5941cbe671..07fc2bbdfa 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -1,3 +1,5 @@ +//! Types for the basic data model of Willow. + use iroh_base::hash::Hash; use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; use willow_encoding::sync::{Decodable, Encodable}; @@ -70,7 +72,13 @@ impl Default for PayloadDigest { impl willow_data_model::PayloadDigest for PayloadDigest {} +/// An immutable Willow [path]. +/// +/// Thread-safe, cheap to clone, cheap to take prefixes of, expensive to append to. +/// /// See [`willow_data_model::Path`]. +/// +/// [path]: https://willowprotocol.org/specs/data-model/index.html#Path pub type Path = willow_data_model::Path; /// Extension methods for [`Path`]. diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index 4f393ec126..b6ac09e8cf 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,3 +1,5 @@ +//! Utilities for Willow's entry [groupings](https://willowprotocol.org/specs/grouping-entries/index.html#grouping_entries). + pub use willow_data_model::grouping::{Range, RangeEnd}; use willow_data_model::SubspaceId as _; diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index 773a060c02..ea1afa2c31 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -1,3 +1,7 @@ +//! The capability system of Willow. +//! +//! Contains an instantiation of [`meadowcap`] for use in iroh-willow. + use super::{ grouping::Area, keys::{self, NamespaceSecretKey, UserSecretKey}, diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index f60cbf56ac..a6d8a468a9 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -1,8 +1,10 @@ -pub mod challenge; -pub mod channels; -pub mod fingerprint; -pub mod handles; -pub mod messages; +//! Types and helpers for the Willow General Purpose Sync protocol. + +mod challenge; +mod channels; +mod fingerprint; +mod handles; +mod messages; pub use challenge::*; pub use channels::*; diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 0529fe5d6a..80cd683ef0 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -1,3 +1,12 @@ +//! The `session` module contains an implementation of the Willow General Purpose Sync Protocol +//! (WGPS). +//! +//! It exposes a few public types used to initiate sessions, and the [`intents`] module which +//! contains handle, event and command types for controlling sessions. +//! +//! Internally, this module contains the full implementation of the protocol, which is started with +//! the `run_session` function (which is not public). + use std::sync::Arc; use channels::ChannelSenders; @@ -12,9 +21,9 @@ use crate::{ mod aoi_finder; mod capabilities; mod challenge; -pub mod channels; +pub(crate) mod channels; mod data; -pub mod error; +mod error; pub mod intents; mod pai_finder; mod payload; @@ -28,7 +37,8 @@ pub(crate) use self::channels::Channels; pub(crate) use self::error::Error; pub(crate) use self::run::run_session; -pub type SessionId = u64; +/// Id per session to identify store subscriptions. +pub(crate) type SessionId = u64; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. @@ -51,25 +61,32 @@ impl Role { } } +/// A session can either run a single reconciliation, or keep open until closed by either peer. +/// +/// * [`Self::Continous`] will enable the live data channels to synchronize updates in real-time. +/// * [`Self::ReconcileOnce`] will run a single reconciliation of the interests declared at session +/// start, and then close the session. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum SessionMode { /// Run a single, full reconciliation, and then quit. ReconcileOnce, /// Run reconciliations and data mode, until intentionally closed. - Live, + Continous, } impl SessionMode { + /// Returns `true` if the session runs in live mode. pub fn is_live(&self) -> bool { - matches!(self, Self::Live) + matches!(self, Self::Continous) } } -/// Options to initialize a session with. +/// Options to initialize a session. #[derive(Debug)] pub struct SessionInit { - /// List of interests we wish to synchronize, together with our capabilities to read them. + /// Selects the areas we wish to synchronize. pub interests: Interests, + /// Selects the session mode (once or continous). pub mode: SessionMode, } @@ -79,41 +96,37 @@ impl SessionInit { Self { interests, mode } } + /// Creates a new [`SessionInit`] with [`SessionMode::Continous`]. pub fn continuous(interests: impl Into) -> Self { - Self::new(interests, SessionMode::Live) + Self::new(interests, SessionMode::Continous) } + /// Creates a new [`SessionInit`] with [`SessionMode::ReconcileOnce`]. pub fn reconcile_once(interests: impl Into) -> Self { Self::new(interests, SessionMode::ReconcileOnce) } } -/// The bind scope for resources. -/// -/// Resources are bound by either peer -#[derive(Copy, Clone, Debug)] -pub enum Scope { - /// Resources bound by ourselves. - Ours, - /// Resources bound by the other peer. - Theirs, -} - +/// Sender for session events #[derive(Debug, Clone)] -pub struct EventSender(pub mpsc::Sender); +pub(crate) struct EventSender(pub mpsc::Sender); impl EventSender { - pub async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { + pub(crate) async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { self.0.send(event).await.map_err(|_| ChannelReceiverDropped) } } +/// Events emitted from a session. +/// +/// These are handled in the [`PeerManager`](crate::engine::peer_manager::PeerManager). #[derive(derive_more::Debug)] -pub enum SessionEvent { +pub(crate) enum SessionEvent { Established, Complete { result: Result<(), Arc>, - // who_cancelled: WhoCancelled, + // TODO(Frando): Not sure if we should make use of this somewhere, maybe just remove. + #[allow(unused)] we_cancelled: bool, #[debug("ChannelSenders")] senders: ChannelSenders, @@ -123,24 +136,34 @@ pub enum SessionEvent { }, } +/// Update commands for an active session. #[derive(Debug)] -pub enum SessionUpdate { +pub(crate) enum SessionUpdate { SubmitIntent(Intent), } +/// Handle to an active session. +/// +/// This is not made public, the only public interface are [`intents`] handles. #[derive(Debug)] -pub struct SessionHandle { - pub cancel_token: CancellationToken, - pub update_tx: mpsc::Sender, - pub event_rx: mpsc::Receiver, +pub(crate) struct SessionHandle { + pub(crate) cancel_token: CancellationToken, + pub(crate) update_tx: mpsc::Sender, + pub(crate) event_rx: mpsc::Receiver, } impl SessionHandle { + // TODO(Frando): Previously the [`SessionHandle`] was exposed through the `net` module. + // Now all public interaction goes through the [`Engine`], which does not use the handle as + // such, but splits into the fields. Leaving this here for the moment in case we decide to + // expose the session handle (without relying on intents) publicly. + /// Wait for the session to finish. /// /// Returns the channel senders and a boolean indicating if we cancelled the session. /// Returns an error if the session failed to complete. - pub async fn complete(&mut self) -> Result<(ChannelSenders, bool), Arc> { + #[cfg(test)] + pub(crate) async fn complete(&mut self) -> Result<(ChannelSenders, bool), Arc> { while let Some(event) = self.event_rx.recv().await { if let SessionEvent::Complete { result, @@ -155,20 +178,21 @@ impl SessionHandle { Err(Arc::new(Error::ActorFailed)) } - /// Submit a new synchronisation intent. - pub async fn submit_intent(&self, intent: Intent) -> anyhow::Result<()> { - self.update_tx - .send(SessionUpdate::SubmitIntent(intent)) - .await?; - Ok(()) - } + // /// Submit a new synchronisation intent. + // pub(crate) async fn submit_intent(&self, intent: Intent) -> anyhow::Result<()> { + // self.update_tx + // .send(SessionUpdate::SubmitIntent(intent)) + // .await?; + // Ok(()) + // } /// Finish the session gracefully. /// /// After calling this, no further protocol messages will be sent from this node. /// Previously queued messages will still be sent out. The session will only be closed /// once the other peer closes their senders as well. - pub fn close(&self) { + #[cfg(test)] + pub(crate) fn close(&self) { tracing::debug!("close session (session handle close called)"); self.cancel_token.cancel(); } diff --git a/iroh-willow/src/session/aoi_finder.rs b/iroh-willow/src/session/aoi_finder.rs index 63ad667bd0..dc59c0df8e 100644 --- a/iroh-willow/src/session/aoi_finder.rs +++ b/iroh-willow/src/session/aoi_finder.rs @@ -14,8 +14,10 @@ use crate::{ }, }, session::{ - capabilities::Capabilities, pai_finder::PaiIntersection, resource::ResourceMap, Error, - Scope, + capabilities::Capabilities, + pai_finder::PaiIntersection, + resource::{ResourceMap, Scope}, + Error, }, util::gen_stream::GenStream, }; diff --git a/iroh-willow/src/session/challenge.rs b/iroh-willow/src/session/challenge.rs index 79ecd1ca66..cb99ed8ec6 100644 --- a/iroh-willow/src/session/challenge.rs +++ b/iroh-willow/src/session/challenge.rs @@ -1,7 +1,7 @@ use super::{Error, Role}; use crate::proto::{ keys::{UserPublicKey, UserSignature}, - wgps::challenge::{AccessChallenge, AccessChallengeBytes, ChallengeHash}, + wgps::{AccessChallenge, AccessChallengeBytes, ChallengeHash}, }; /// Data from the initial transmission diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 834fc5c9e6..e01bc140fb 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -1,3 +1,11 @@ +//! Intents are handles onto a Willow synchronisation session. +//! +//! They are created with [`crate::Engine::sync_with_peer`]. +//! +//! An intent receives events from the session, and can submit new interests to be synchronized. +//! +//! Once all intents for a peer are complete, the session is closed. + use std::{ collections::{HashMap, HashSet, VecDeque}, future::Future, @@ -17,12 +25,12 @@ use tokio_util::sync::PollSender; use tracing::{debug, trace, warn}; use crate::{ - interest::InterestMap, + interest::{InterestMap, Interests}, proto::{ grouping::{Area, AreaOfInterest}, keys::NamespaceId, }, - session::{error::ChannelReceiverDropped, Error, Interests, SessionInit, SessionMode}, + session::{error::ChannelReceiverDropped, Error, SessionInit, SessionMode}, store::{auth::Auth, traits::Storage}, util::gen_stream::GenStream, }; @@ -37,27 +45,35 @@ pub type IntentId = u64; type Sender = mpsc::Sender; type Receiver = mpsc::Receiver; +/// Events emitted from a session for an synchronisation intent. #[derive(Debug, Clone, Eq, PartialEq)] pub enum EventKind { + /// We found an intersection between our and the peer's capabilities. CapabilityIntersection { namespace: NamespaceId, area: Area, }, + /// We found an intersection between our and the peer's interests and will start to synchronize + /// the area. InterestIntersection { namespace: NamespaceId, area: AreaOfInterest, }, + /// We reconciled an area. Reconciled { namespace: NamespaceId, area: AreaOfInterest, }, + /// We reconciled all interests submitted in this intent. ReconciledAll, + /// The session was closed with an error. Abort { error: Arc, }, } impl EventKind { + /// Returns the namespace if the event is related to a namespace. pub fn namespace(&self) -> Option { match self { EventKind::CapabilityIntersection { namespace, .. } => Some(*namespace), @@ -68,9 +84,15 @@ impl EventKind { } } +/// Updates that may be submitted from an intent into the synchronisation session. #[derive(Debug)] pub enum IntentUpdate { + /// Submit new interests into the session. AddInterests(Interests), + /// Close the intent. + /// + /// It is not required to send this, but may reduce the time an intent is lingering while no + /// subscriber is live anymore. Close, } @@ -144,6 +166,7 @@ impl Intent { } } +/// Outcome of driving an intent to completion. #[derive(Debug, Eq, PartialEq)] pub enum Completion { /// All interests were reconciled. diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index f337939380..6e0503683f 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -29,8 +29,8 @@ use crate::{ }, }, session::{ - resource::{MissingResource, ResourceMap}, - Error, Scope, + resource::{MissingResource, ResourceMap, Scope}, + Error, }, util::gen_stream::GenStream, }; diff --git a/iroh-willow/src/session/resource.rs b/iroh-willow/src/session/resource.rs index f5a1c84bee..eb145741da 100644 --- a/iroh-willow/src/session/resource.rs +++ b/iroh-willow/src/session/resource.rs @@ -7,6 +7,17 @@ use crate::proto::wgps::{IsHandle, ResourceHandle}; use super::Error; +/// The bind scope for resources. +/// +/// Resources are bound by either peer +#[derive(Copy, Clone, Debug)] +pub enum Scope { + /// Resources bound by ourselves. + Ours, + /// Resources bound by the other peer. + Theirs, +} + #[derive(Debug)] pub struct ResourceMap { next_handle: u64, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index b0097fd153..53083405e5 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -74,7 +74,7 @@ pub(crate) async fn run_session( .fold(SessionMode::ReconcileOnce, |cur, intent| { match intent.init.mode { SessionMode::ReconcileOnce => cur, - SessionMode::Live => SessionMode::Live, + SessionMode::Continous => SessionMode::Continous, } }); @@ -107,7 +107,7 @@ pub(crate) async fn run_session( // Setup data channels only if in live mode. // TODO: Adapt to changing mode. - let (data_inbox, data_inbox_rx) = if mode == SessionMode::Live { + let (data_inbox, data_inbox_rx) = if mode == SessionMode::Continous { let (data_inbox, data_inbox_rx) = cancelable_channel::(2, cancel_token.clone()); (Some(data_inbox), Some(data_inbox_rx)) diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 0664ed5bf6..17d34d64e4 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,3 +1,11 @@ +//! Store for entries, secrets, and capabilities used in the Willow engine. +//! +//! The [`Store`] is the high-level wrapper for the different stores we need. +//! +//! The storage backend is defined in the [`Storage`] trait and its associated types. +//! +//! The only implementation is currently an in-memory store at [`memory`]. + use anyhow::{anyhow, Context, Result}; use rand_core::CryptoRngCore; @@ -16,15 +24,16 @@ use crate::{ use self::auth::{Auth, AuthError}; use self::traits::Storage; -pub use self::entry::{EntryOrigin, WatchableEntryStore}; +pub(crate) use self::entry::{EntryOrigin, WatchableEntryStore}; -pub mod auth; -pub mod entry; +pub(crate) mod auth; +pub(crate) mod entry; pub mod memory; pub mod traits; +/// Storage for the Willow engine. #[derive(Debug, Clone)] -pub struct Store { +pub(crate) struct Store { entries: WatchableEntryStore, secrets: S::Secrets, payloads: S::Payloads, diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 5ef9c465fa..9198692ad1 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -1,3 +1,7 @@ +//! Authentication backend for Willow. +//! +//! Manages capabilities. + use std::collections::{HashMap, HashSet}; use anyhow::Result; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index fef65673fc..54aebf2d44 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -1,3 +1,10 @@ +//! In-memory storage implementation for testing purposes. +//! +//! This is a minimal, but spec-compliant (unless there's bugs) implementation of a willow store. +//! +//! It does not have good performance, it does a lot of iterating. But it is concise and can +//! hopefully easily kept correct. + use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index caa4beb735..8cfd8571ea 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -1,3 +1,5 @@ +//! Traits for storage backends for the Willow store. + use std::fmt::Debug; use anyhow::Result; @@ -13,6 +15,9 @@ use crate::{ }, }; +/// Storage backend. +/// +/// This type combines the different stores needed. pub trait Storage: Debug + Clone + 'static { type Entries: EntryStorage; type Secrets: SecretStorage; @@ -24,6 +29,7 @@ pub trait Storage: Debug + Clone + 'static { fn caps(&self) -> &Self::Caps; } +/// Storage for user and namespace secrets. pub trait SecretStorage: Debug + Clone + 'static { fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), SecretStoreError>; fn get_user(&self, id: &UserId) -> Option; @@ -69,6 +75,7 @@ pub trait SecretStorage: Debug + Clone + 'static { } } +/// Storage for entries. pub trait EntryStorage: EntryReader + Clone + Debug + 'static { type Reader: EntryReader; type Snapshot: EntryReader + Clone; @@ -78,6 +85,7 @@ pub trait EntryStorage: EntryReader + Clone + Debug + 'static { fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result; } +/// Read-only interface to [`EntryStorage`]. pub trait EntryReader: Debug + 'static { fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result; @@ -106,6 +114,7 @@ pub trait EntryReader: Debug + 'static { } } +/// Error returned from [`SecretStorage`]. #[derive(Debug, thiserror::Error)] pub enum SecretStoreError { #[error("store failed: {0}")] @@ -145,6 +154,7 @@ impl Default for SplitOpts { } } +/// Capability storage. pub trait CapsStorage: Debug + Clone { fn insert(&self, cap: CapabilityPack) -> Result<()>; diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index abb2233193..984027498e 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -137,7 +137,7 @@ async fn peer_manager_update_intent() -> Result<()> { let path = Path::from_bytes(&[b"foo"]).unwrap(); let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::Live); + let init = SessionInit::new(interests, SessionMode::Continous); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); assert_eq!( @@ -289,12 +289,12 @@ mod util { engine::{AcceptOpts, Engine}, form::EntryForm, interest::{CapSelector, DelegateTo, RestrictArea}, - net::ALPN, proto::{ data_model::{Path, PathExt}, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::AccessMode, }, + ALPN, }; pub fn create_rng(seed: &str) -> ChaCha12Rng { From 965179a5458d81548816b106651f5508b112d286 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:53:01 +0200 Subject: [PATCH 143/198] fixup --- iroh-willow/src/session/channels.rs | 28 ++++++++++++++-------------- iroh-willow/src/store/entry.rs | 8 ++++---- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 874e53ade0..6c52b33f66 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -25,13 +25,13 @@ pub struct MessageReceiver { } impl> MessageReceiver { - pub async fn recv(&mut self) -> Option> { - poll_fn(|cx| self.poll_recv(cx)).await - } + // pub async fn recv(&mut self) -> Option> { + // poll_fn(|cx| self.poll_recv(cx)).await + // } - pub fn close(&self) { - self.inner.close() - } + // pub fn close(&self) { + // self.inner.close() + // } pub fn poll_recv(&mut self, cx: &mut task::Context<'_>) -> Poll>> { let message = ready!(Pin::new(&mut self.inner).poll_next(cx)); @@ -75,14 +75,14 @@ pub struct LogicalChannelReceivers { } impl LogicalChannelReceivers { - pub fn close(&self) { - self.intersection_recv.close(); - self.reconciliation_recv.close(); - self.static_tokens_recv.close(); - self.capability_recv.close(); - self.aoi_recv.close(); - self.data_recv.close(); - } + // pub fn close(&self) { + // self.intersection_recv.close(); + // self.reconciliation_recv.close(); + // self.static_tokens_recv.close(); + // self.capability_recv.close(); + // self.aoi_recv.close(); + // self.data_recv.close(); + // } } #[derive(Debug, Clone)] diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index dd92b12f67..eb3c5d499e 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -59,10 +59,10 @@ impl WatchableEntryStore { } } - /// Returns a store reader. - pub fn reader(&self) -> ES::Reader { - self.storage.reader() - } + // /// Returns a store reader. + // pub fn reader(&self) -> ES::Reader { + // self.storage.reader() + // } /// Returns a store snapshot. pub fn snapshot(&self) -> anyhow::Result { From 3b07a1b92f7248a4632b9f0acc7698b06b8c7990 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:53:14 +0200 Subject: [PATCH 144/198] chore: fmt --- iroh-willow/src/session/intents.rs | 9 ++------- iroh-willow/src/session/pai_finder.rs | 2 +- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index e01bc140fb..192ee672e7 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -49,10 +49,7 @@ type Receiver = mpsc::Receiver; #[derive(Debug, Clone, Eq, PartialEq)] pub enum EventKind { /// We found an intersection between our and the peer's capabilities. - CapabilityIntersection { - namespace: NamespaceId, - area: Area, - }, + CapabilityIntersection { namespace: NamespaceId, area: Area }, /// We found an intersection between our and the peer's interests and will start to synchronize /// the area. InterestIntersection { @@ -67,9 +64,7 @@ pub enum EventKind { /// We reconciled all interests submitted in this intent. ReconciledAll, /// The session was closed with an error. - Abort { - error: Arc, - }, + Abort { error: Arc }, } impl EventKind { diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 6e0503683f..94a6c68d33 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -30,7 +30,7 @@ use crate::{ }, session::{ resource::{MissingResource, ResourceMap, Scope}, - Error, + Error, }, util::gen_stream::GenStream, }; From e3ee3af0f75c50fcb473c71cb1a0ddf497150078 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:54:16 +0200 Subject: [PATCH 145/198] chore: codespell --- iroh-willow/src/engine.rs | 2 +- iroh-willow/src/interest.rs | 4 ++-- iroh-willow/src/net.rs | 4 ++-- iroh-willow/src/session.rs | 10 +++++----- iroh-willow/src/session/run.rs | 4 ++-- iroh-willow/tests/basic.rs | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 51ec3cbd70..72b5b01f5a 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -86,7 +86,7 @@ impl Engine { /// /// `init` contains the initialisation options for this synchronisation intent. /// - /// Returns an [`IntentHandle`] which receives events and can submit udpates into the session. + /// Returns an [`IntentHandle`] which receives events and can submit updates into the session. /// /// This can freely be called multiple times for the same peer. The engine will merge the /// intents and make sure that only a single session is opened per peer. diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index c71a8e23ac..22af2ad0c0 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -65,7 +65,7 @@ impl IntoAreaOfInterest for Area { impl InterestBuilder { /// Add the full area of a capability we have into the interests. /// - /// See [`CapSelector`] for how to specifiy the capability to use. + /// See [`CapSelector`] for how to specify the capability to use. pub fn add_full_cap(mut self, cap: impl Into) -> Self { let cap = cap.into(); self.0.insert(cap, AreaOfInterestSelector::Widest); @@ -74,7 +74,7 @@ impl InterestBuilder { /// Add a specific area included in one of our capabilities into the interests. /// - /// See [`CapSelector`] for how to specifiy the capability to use. + /// See [`CapSelector`] for how to specify the capability to use. pub fn add_area( mut self, cap: impl Into, diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 89b6b21f7f..9ba518a0d3 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -663,8 +663,8 @@ mod tests { } }); - let init_alfie = SessionInit::new(Interests::All, SessionMode::Continous); - let init_betty = SessionInit::new(Interests::All, SessionMode::Continous); + let init_alfie = SessionInit::new(Interests::All, SessionMode::Continuous); + let init_betty = SessionInit::new(Interests::All, SessionMode::Continuous); let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 80cd683ef0..09255e5197 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -63,7 +63,7 @@ impl Role { /// A session can either run a single reconciliation, or keep open until closed by either peer. /// -/// * [`Self::Continous`] will enable the live data channels to synchronize updates in real-time. +/// * [`Self::Continuous`] will enable the live data channels to synchronize updates in real-time. /// * [`Self::ReconcileOnce`] will run a single reconciliation of the interests declared at session /// start, and then close the session. #[derive(Debug, Clone, Copy, Eq, PartialEq)] @@ -71,13 +71,13 @@ pub enum SessionMode { /// Run a single, full reconciliation, and then quit. ReconcileOnce, /// Run reconciliations and data mode, until intentionally closed. - Continous, + Continuous, } impl SessionMode { /// Returns `true` if the session runs in live mode. pub fn is_live(&self) -> bool { - matches!(self, Self::Continous) + matches!(self, Self::Continuous) } } @@ -96,9 +96,9 @@ impl SessionInit { Self { interests, mode } } - /// Creates a new [`SessionInit`] with [`SessionMode::Continous`]. + /// Creates a new [`SessionInit`] with [`SessionMode::Continuous`]. pub fn continuous(interests: impl Into) -> Self { - Self::new(interests, SessionMode::Continous) + Self::new(interests, SessionMode::Continuous) } /// Creates a new [`SessionInit`] with [`SessionMode::ReconcileOnce`]. diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 53083405e5..02dcc346de 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -74,7 +74,7 @@ pub(crate) async fn run_session( .fold(SessionMode::ReconcileOnce, |cur, intent| { match intent.init.mode { SessionMode::ReconcileOnce => cur, - SessionMode::Continous => SessionMode::Continous, + SessionMode::Continuous => SessionMode::Continuous, } }); @@ -107,7 +107,7 @@ pub(crate) async fn run_session( // Setup data channels only if in live mode. // TODO: Adapt to changing mode. - let (data_inbox, data_inbox_rx) = if mode == SessionMode::Continous { + let (data_inbox, data_inbox_rx) = if mode == SessionMode::Continuous { let (data_inbox, data_inbox_rx) = cancelable_channel::(2, cancel_token.clone()); (Some(data_inbox), Some(data_inbox_rx)) diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 984027498e..2e2a6afe9c 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -137,7 +137,7 @@ async fn peer_manager_update_intent() -> Result<()> { let path = Path::from_bytes(&[b"foo"]).unwrap(); let interests = Interests::builder().add_area(namespace, [Area::new_path(path.clone())]); - let init = SessionInit::new(interests, SessionMode::Continous); + let init = SessionInit::new(interests, SessionMode::Continuous); let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); assert_eq!( From c5376988c5e78649a0bcac6ad0d11dec98e65084 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:55:31 +0200 Subject: [PATCH 146/198] fixup --- iroh-willow/src/session/channels.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/iroh-willow/src/session/channels.rs b/iroh-willow/src/session/channels.rs index 6c52b33f66..1b283d2f21 100644 --- a/iroh-willow/src/session/channels.rs +++ b/iroh-willow/src/session/channels.rs @@ -1,5 +1,4 @@ use std::{ - future::poll_fn, marker::PhantomData, pin::Pin, task::{self, ready, Poll}, From 46ff9bfcdc11b237df4bb384b95f398df1c41f0d Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 15:59:40 +0200 Subject: [PATCH 147/198] fixup --- iroh-willow/src/engine.rs | 4 +++- iroh-willow/src/engine/actor.rs | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 72b5b01f5a..d33a480e81 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -1,3 +1,5 @@ +//! Engine for driving a willow store and synchronisation sessions. + use anyhow::Result; use iroh_net::{endpoint::Connection, util::SharedAbortingJoinHandle, Endpoint, NodeId}; use tokio::sync::{mpsc, oneshot}; @@ -16,7 +18,7 @@ mod peer_manager; use self::peer_manager::PeerManager; -pub(crate) use self::actor::ActorHandle; +pub use self::actor::ActorHandle; pub use self::peer_manager::AcceptOpts; const PEER_MANAGER_INBOX_CAP: usize = 128; diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 1d78c3639a..f762646f44 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -32,6 +32,7 @@ pub const INBOX_CAP: usize = 1024; pub const SESSION_EVENT_CHANNEL_CAP: usize = 64; pub const SESSION_UPDATE_CHANNEL_CAP: usize = 64; +/// Handle to a Willow storage thread. #[derive(Debug, Clone)] pub struct ActorHandle { inbox_tx: flume::Sender, From 723f21ca4af5f7ab163d9d5cb5b8acadb2e890a6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 13 Aug 2024 16:01:05 +0200 Subject: [PATCH 148/198] chore: typo --- iroh-willow/src/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 09255e5197..c25e952ad1 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -86,7 +86,7 @@ impl SessionMode { pub struct SessionInit { /// Selects the areas we wish to synchronize. pub interests: Interests, - /// Selects the session mode (once or continous). + /// Selects the session mode (once or continuous). pub mode: SessionMode, } From 5a449ffaeddb91e3af4ffb3075c2b45c04906fa6 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 14 Aug 2024 00:51:35 +0200 Subject: [PATCH 149/198] test: do not use relay in tests --- iroh-willow/src/net.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9ba518a0d3..d244f0183c 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -753,6 +753,7 @@ mod tests { ) -> Result<(Endpoint, NodeId, NodeAddr)> { let ep = Endpoint::builder() .secret_key(SecretKey::generate_with_rng(rng)) + .relay_mode(iroh_net::relay::RelayMode::Disabled) .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; From 69920513d3dccefe7723f6fd4a91daad21ae81b0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 19 Aug 2024 16:20:10 +0200 Subject: [PATCH 150/198] refactor: ensure correct ordering when closing sessions --- iroh-willow/src/engine/peer_manager.rs | 77 +++++------ iroh-willow/src/session.rs | 2 - iroh-willow/src/session/data.rs | 7 +- iroh-willow/src/session/error.rs | 15 ++- iroh-willow/src/session/intents.rs | 94 +++++++------ iroh-willow/src/session/reconciler.rs | 12 +- iroh-willow/src/session/run.rs | 177 ++++++++++++++----------- iroh-willow/src/util/stream.rs | 47 +++++++ 8 files changed, 263 insertions(+), 168 deletions(-) diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 46ba6ae660..2682ab58f5 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -264,8 +264,7 @@ impl PeerManager { ref mut intents, .. } | PeerState::Closing { - new_intents: ref mut intents, - .. + ref mut intents, .. } => std::mem::take(intents), _ => vec![], }; @@ -342,14 +341,21 @@ impl PeerManager { } => { intents.push(intent); } - PeerState::Active { ref update_tx, .. } => { + PeerState::Active { + ref update_tx, + ref mut intents_after_close, + .. + } => { if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { + debug!("failed to submit intent into active session, queue in peer state"); let SessionUpdate::SubmitIntent(intent) = err.0; - intent.send_abort(Arc::new(Error::ActorFailed)).await; + intents_after_close.push(intent); + } else { + trace!("intent sent to session"); } } PeerState::Closing { - ref mut new_intents, + intents: ref mut new_intents, .. } => { new_intents.push(intent); @@ -364,36 +370,32 @@ impl PeerManager { SessionEvent::Complete { result, senders, - remaining_intents, - mut update_receiver, - .. + mut remaining_intents, + we_cancelled: _, } => { - trace!(error=?result.err(), ?remaining_intents, "session complete"); + debug!(error=?result.err(), remaining_intents=remaining_intents.len(), "session complete"); // Close the channel senders. This will cause our send loops to close, // which in turn causes the receive loops of the other peer to close. senders.close_all(); let Some(peer_info) = self.peers.get_mut(&peer) else { - warn!("got session complete for unknown peer"); + warn!("got session complete event for unknown peer"); return; }; - // TODO(frando): How exactly to deal with the `remaining_intents` is tbd. - // Current impl: We store them in the Closing state, wait if the connection closed with error or not, - // and if it closed with error abort them with this error, otherwise they are dropped (which also closes their event streams). - // We could potentially restart them, as we do with the new intents that came in after session termination, - // but we'd have to think carefully about endless loops there. - - // However, the intents that are still in the update channel are completely unprocessed, so they - // should get their chance via a reconnect. - let mut new_intents = vec![]; - while let Ok(SessionUpdate::SubmitIntent(intent)) = update_receiver.try_recv() { - new_intents.push(intent); - } + let PeerState::Active { + ref mut intents_after_close, + .. + } = peer_info.state + else { + warn!("got session complete event for peer not in active state"); + return; + }; + remaining_intents.append(intents_after_close); + peer_info.state = PeerState::Closing { - old_intents: remaining_intents, - new_intents, + intents: remaining_intents, }; trace!("entering closing state"); } @@ -445,19 +447,15 @@ impl PeerManager { ) .await; } - PeerState::Closing { - old_intents, - new_intents, - } => { + PeerState::Closing { intents } => { debug!(?err, "connection failed to close gracefully"); // If we were are in closing state, we still forward the connection error to the intents. // This would be the place where we'd implement retries: instead of aborting the intents, resubmit them. // Right now, we only resubmit intents that were submitted while terminating a session, and only if the session closed gracefully. let err = Arc::new(Error::Net(err)); join_all( - old_intents + intents .into_iter() - .chain(new_intents.into_iter()) .map(|intent| intent.send_abort(err.clone())), ) .await; @@ -540,6 +538,7 @@ impl PeerManager { peer_info.state = PeerState::Active { update_tx, cancel_token, + intents_after_close: vec![], }; peer_info.abort_handle = Some(abort_handle); } @@ -561,18 +560,13 @@ impl PeerManager { Ok(ConnStep::Closed) => { debug!("connection closed gracefully"); let peer_info = self.peers.remove(&peer).expect("just checked"); - if let PeerState::Closing { - new_intents, - old_intents, - } = peer_info.state - { - drop(old_intents); - if !new_intents.is_empty() { + if let PeerState::Closing { intents } = peer_info.state { + if !intents.is_empty() { debug!( "resubmitting {} intents that were not yet processed", - new_intents.len() + intents.len() ); - for intent in new_intents { + for intent in intents { self.submit_intent(peer, intent).await; } } @@ -650,10 +644,11 @@ enum PeerState { Active { cancel_token: CancellationToken, update_tx: mpsc::Sender, + /// List of intents that we failed to submit into the session because it is closing. + intents_after_close: Vec, }, Closing { - old_intents: Vec, - new_intents: Vec, + intents: Vec, }, } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index c25e952ad1..0a7bf135be 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -131,8 +131,6 @@ pub(crate) enum SessionEvent { #[debug("ChannelSenders")] senders: ChannelSenders, remaining_intents: Vec, - #[debug("Receiver")] - update_receiver: mpsc::Receiver, }, } diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index 892333cda1..acb5cd9d07 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -1,6 +1,5 @@ use futures_lite::StreamExt; use tokio::sync::broadcast; -use tokio_stream::wrappers::ReceiverStream; use crate::{ proto::{ @@ -16,7 +15,7 @@ use crate::{ traits::Storage, Store, }, - util::stream::Cancelable, + util::stream::CancelableReceiver, }; use super::{ @@ -31,7 +30,7 @@ pub enum Input { #[derive(derive_more::Debug)] pub struct DataSender { - inbox: Cancelable>, + inbox: CancelableReceiver, store: Store, send: ChannelSenders, static_tokens: StaticTokens, @@ -40,7 +39,7 @@ pub struct DataSender { impl DataSender { pub fn new( - inbox: Cancelable>, + inbox: CancelableReceiver, store: Store, send: ChannelSenders, static_tokens: StaticTokens, diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 502a4fcb9f..161225655c 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -1,4 +1,5 @@ use ed25519_dalek::SignatureError; +use tokio::sync::mpsc; use crate::{ proto::{data_model::UnauthorisedWriteError, meadowcap::UserId, wgps::ResourceHandle}, @@ -71,10 +72,12 @@ pub enum Error { Pai(#[from] PaiError), #[error("net failed: {0}")] Net(anyhow::Error), - #[error("channel receiver dropped")] - ChannelDropped, + #[error("channel closed unexpectedly")] + ChannelClosed, #[error("our node is shutting down")] ShuttingDown, + #[error("The operation was cancelled locally")] + Cancelled, } #[derive(Debug, thiserror::Error)] @@ -82,7 +85,7 @@ pub enum Error { pub struct ChannelReceiverDropped; impl From for Error { fn from(_: ChannelReceiverDropped) -> Self { - Self::ChannelDropped + Self::ChannelClosed } } @@ -135,3 +138,9 @@ impl From for Error { Self::MissingResource(value.0) } } + +impl From> for Error { + fn from(_error: mpsc::error::SendError) -> Self { + Self::ChannelClosed + } +} diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 192ee672e7..3a6fee86e6 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -270,6 +270,7 @@ pub(super) enum Output { #[derive(derive_more::Debug)] pub(super) struct IntentDispatcher { + inbox: mpsc::Receiver, pending_intents: VecDeque, intents: HashMap, auth: Auth, @@ -280,8 +281,13 @@ pub(super) struct IntentDispatcher { } impl IntentDispatcher { - pub(super) fn new(auth: Auth, initial_intents: impl IntoIterator) -> Self { + pub(super) fn new( + auth: Auth, + initial_intents: impl IntoIterator, + inbox: mpsc::Receiver, + ) -> Self { Self { + inbox, pending_intents: initial_intents.into_iter().collect(), intents: Default::default(), auth, @@ -312,32 +318,39 @@ impl IntentDispatcher { } /// Takes self and returns all pending intents. - // TODO: What if one of the two channels closed? - // Should not do Option but an option for each direction instead likely on Intent. - pub(super) fn drain_all(mut self) -> Vec { - let mut intents: Vec<_> = self.pending_intents.into_iter().collect(); - for (id, info) in self.intents.drain() { - let event_tx = info.event_tx; - let update_rx = self.intent_update_rx.remove(&id); - let update_rx = update_rx - .and_then(|stream| stream.into_inner()) - .map(|stream| stream.into_inner()); - let channels = match (event_tx, update_rx) { - (Some(event_tx), Some(update_rx)) => Some(IntentChannels { - event_tx, - update_rx, - }), - _ => None, - }; - if let Some(channels) = channels { - let intent = Intent { - init: info.original_init, - channels: Some(channels), - }; - intents.push(intent); + pub(super) async fn drain_all(mut self) -> Vec { + // Drain inbox. + let mut pending_intents = vec![]; + self.inbox.close(); + while let Ok(item) = self.inbox.try_recv() { + match item { + Input::EmitEvent(event) => { + self.emit_event_inner(event).await; + } + Input::SubmitIntent(intent) => pending_intents.push(intent), } } - intents + + // Drain pending intents. + pending_intents.extend(self.pending_intents.into_iter()); + + // Abort active intents. + let error = Arc::new(Error::Cancelled); + let active_intents = self.intents.drain().filter_map(|(_id, info)| { + if info.is_complete() { + None + } else { + info.event_tx + } + }); + let _ = futures_buffered::join_all(active_intents.map(|event_tx| { + let error = error.clone(); + async move { event_tx.send(EventKind::Abort { error }).await } + })) + .await; + + // Return pending (unprocessed) intents. + pending_intents } /// Run the [`IntentDispatcher`]. @@ -345,21 +358,18 @@ impl IntentDispatcher { /// The returned stream is a generator, so it must be polled repeatedly to progress. pub(super) fn run_gen( &mut self, - inbox: impl Stream + 'static, ) -> GenStream> + '_> { - GenStream::new(|co| self.run(co, inbox)) + GenStream::new(|co| self.run(co)) } - async fn run(&mut self, co: Co, inbox: impl Stream) -> Result<(), Error> { - tokio::pin!(inbox); - + async fn run(&mut self, co: Co) -> Result<(), Error> { while let Some(intent) = self.pending_intents.pop_front() { self.submit_intent(&co, intent).await?; } trace!("submitted initial intents, start loop"); loop { tokio::select! { - input = inbox.next() => { + input = self.inbox.recv() => { trace!(?input, "tick: inbox"); let Some(input) = input else { break; @@ -395,6 +405,7 @@ impl IntentDispatcher { } async fn submit_intent(&mut self, co: &Co, intent: Intent) -> Result<(), Error> { + debug!("submit intent"); let interests = self.auth.resolve_interests(intent.init.interests.clone())?; let intent_id = { let intent_id = self.next_intent_id; @@ -413,7 +424,6 @@ impl IntentDispatcher { interests: flatten_interests(&interests), mode: intent.init.mode, event_tx, - original_init: intent.init, }; // Send out reconciled events for already-complete areas. for (namespace, areas) in &self.complete_areas { @@ -436,7 +446,7 @@ impl IntentDispatcher { Ok(()) } - async fn emit_event(&mut self, co: &Co, event: EventKind) { + async fn emit_event_inner(&mut self, event: EventKind) { if let EventKind::Reconciled { namespace, area } = &event { self.complete_areas .entry(*namespace) @@ -452,18 +462,25 @@ impl IntentDispatcher { match res { Err(ChannelReceiverDropped) => { if !self.intent_update_rx.contains_key(&id) { - self.cancel_intent(co, id).await; + self.cancel_intent_inner(id); } } Ok(is_complete) => { if is_complete { - self.cancel_intent(co, id).await; + self.cancel_intent_inner(id); } } } } } + async fn emit_event(&mut self, co: &Co, event: EventKind) { + self.emit_event_inner(event).await; + if self.intents.is_empty() { + co.yield_(Output::AllIntentsDropped).await; + } + } + async fn update_intent( &mut self, co: &Co, @@ -487,10 +504,14 @@ impl IntentDispatcher { Ok(()) } - async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { + fn cancel_intent_inner(&mut self, intent_id: u64) { trace!(?intent_id, "cancel intent"); self.intent_update_rx.remove(&intent_id); self.intents.remove(&intent_id); + } + + async fn cancel_intent(&mut self, co: &Co, intent_id: u64) { + self.cancel_intent_inner(intent_id); if self.intents.is_empty() { co.yield_(Output::AllIntentsDropped).await; } @@ -499,7 +520,6 @@ impl IntentDispatcher { #[derive(Debug)] pub(super) struct IntentInfo { - original_init: SessionInit, interests: NamespaceInterests, mode: SessionMode, event_tx: Option>, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 5328d42c2a..62f8ae1d1b 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -7,7 +7,6 @@ use bytes::Bytes; use futures_lite::StreamExt; use genawaiter::rc::Co; use iroh_blobs::store::Store as PayloadStore; -use tokio_stream::wrappers::ReceiverStream; use tracing::{debug, trace}; use crate::{ @@ -34,7 +33,10 @@ use crate::{ traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Store, }, - util::{gen_stream::GenStream, stream::Cancelable}, + util::{ + gen_stream::GenStream, + stream::{Cancelable, CancelableReceiver}, + }, }; #[derive(Debug)] @@ -67,7 +69,7 @@ impl Reconciler { /// The returned stream is a generator, so it must be polled repeatedly to progress. #[allow(clippy::too_many_arguments)] pub fn run_gen( - inbox: Cancelable>, + inbox: CancelableReceiver, store: Store, recv: Cancelable>, static_tokens: StaticTokens, @@ -223,11 +225,11 @@ impl Reconciler { #[derive(Debug)] struct TargetMap { map: HashMap>, - inbox: Cancelable>, + inbox: CancelableReceiver, } impl TargetMap { - pub fn new(inbox: Cancelable>) -> Self { + pub fn new(inbox: CancelableReceiver) -> Self { Self { map: Default::default(), inbox, diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 02dcc346de..20a031c149 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -1,6 +1,9 @@ use std::{future::Future, sync::Arc}; -use futures_concurrency::{future::TryJoin, stream::StreamExt as _}; +use futures_concurrency::{ + future::{Join as _, TryJoin as _}, + stream::StreamExt as _, +}; use futures_lite::StreamExt as _; use strum::IntoEnumIterator; use tokio::sync::mpsc; @@ -23,13 +26,15 @@ use crate::{ Channels, Error, EventSender, Role, SessionEvent, SessionId, SessionUpdate, }, store::{traits::Storage, Store}, - util::{channel::Receiver, stream::Cancelable}, + util::{ + channel::Receiver, + stream::{Cancelable, CancelableReceiver}, + }, }; use super::{ channels::ChannelReceivers, data::{DataReceiver, DataSender}, - error::ChannelReceiverDropped, reconciler::Reconciler, SessionMode, }; @@ -40,7 +45,7 @@ pub(crate) async fn run_session( store: Store, conn: ConnHandle, initial_intents: Vec, - cancel_token: CancellationToken, + close_session_token: CancellationToken, session_id: SessionId, event_sender: EventSender, update_receiver: ReceiverStream, @@ -80,15 +85,14 @@ pub(crate) async fn run_session( debug!(role = ?our_role, ?mode, "start session"); - // Make all our receivers close once the cancel_token is triggered. - let control_recv = Cancelable::new(control_recv, cancel_token.clone()); - let reconciliation_recv = Cancelable::new(reconciliation_recv, cancel_token.clone()); - let intersection_recv = Cancelable::new(intersection_recv, cancel_token.clone()); - let mut static_tokens_recv = Cancelable::new(static_tokens_recv, cancel_token.clone()); - let mut capability_recv = Cancelable::new(capability_recv, cancel_token.clone()); - let mut aoi_recv = Cancelable::new(aoi_recv, cancel_token.clone()); - let mut data_recv = Cancelable::new(data_recv, cancel_token.clone()); - let mut update_receiver = Cancelable::new(update_receiver, cancel_token.clone()); + // Make all our receivers close once the close session token is triggered. + let control_recv = Cancelable::new(control_recv, close_session_token.clone()); + let reconciliation_recv = Cancelable::new(reconciliation_recv, close_session_token.clone()); + let intersection_recv = Cancelable::new(intersection_recv, close_session_token.clone()); + let mut static_tokens_recv = Cancelable::new(static_tokens_recv, close_session_token.clone()); + let mut capability_recv = Cancelable::new(capability_recv, close_session_token.clone()); + let mut aoi_recv = Cancelable::new(aoi_recv, close_session_token.clone()); + let mut data_recv = Cancelable::new(data_recv, close_session_token.clone()); let caps = Capabilities::new( initial_transmission.our_nonce, @@ -97,40 +101,51 @@ pub(crate) async fn run_session( let tokens = StaticTokens::default(); // Setup channels for communication between the loops. - let (pai_inbox, pai_inbox_rx) = cancelable_channel::(2, cancel_token.clone()); + // All channels but the intents channel are "cancelable", which means that once the cancel + // token is invoked, no new messages may be sent into the channel. + let close_inboxes_token = CancellationToken::new(); + let mut update_receiver = CancelableReceiver::new(update_receiver, close_inboxes_token.clone()); + let (pai_inbox, pai_inbox_rx) = + cancelable_channel::(2, close_inboxes_token.clone()); let (intersection_inbox, intersection_inbox_rx) = - cancelable_channel::(2, cancel_token.clone()); + cancelable_channel::(2, close_inboxes_token.clone()); let (reconciler_inbox, reconciler_inbox_rx) = - cancelable_channel::(2, cancel_token.clone()); - let (intents_inbox, intents_inbox_rx) = - cancelable_channel::(2, cancel_token.clone()); + cancelable_channel::(2, close_inboxes_token.clone()); + + // The closing ceremony for the intents inbox is more involved: It is a regular channel, + // because the inbox should stay open after the cancel token is triggered, so that further + // events may still be emitted. To close the channel, we manually ensure that all senders are + // dropped once all other work is done. + let (intents_inbox, intents_inbox_rx) = channel::(2); // Setup data channels only if in live mode. // TODO: Adapt to changing mode. let (data_inbox, data_inbox_rx) = if mode == SessionMode::Continuous { let (data_inbox, data_inbox_rx) = - cancelable_channel::(2, cancel_token.clone()); + cancelable_channel::(2, close_inboxes_token.clone()); (Some(data_inbox), Some(data_inbox_rx)) } else { (None, None) }; - let mut intents = intents::IntentDispatcher::new(store.auth().clone(), initial_intents); + let mut intents = + intents::IntentDispatcher::new(store.auth().clone(), initial_intents, intents_inbox_rx); let intents_fut = with_span(error_span!("intents"), async { use intents::Output; - let mut intents_gen = intents.run_gen(intents_inbox_rx); + let mut intents_gen = intents.run_gen(); while let Some(output) = intents_gen.try_next().await? { trace!(?output, "yield"); match output { Output::SubmitInterests(interests) => { intersection_inbox .send(aoi_finder::Input::AddInterests(interests)) - .await?; + .await + .ok(); } // TODO: Add Output::SetMode(SessionMode) to propagate mode changes. Output::AllIntentsDropped => { debug!("close session (all intents dropped)"); - cancel_token.cancel(); + close_session_token.cancel(); } } } @@ -164,19 +179,22 @@ pub(crate) async fn run_session( } }); + let intents_inbox_2 = intents_inbox.clone(); let update_loop = with_span(error_span!("update"), async { while let Some(update) = update_receiver.next().await { match update { SessionUpdate::SubmitIntent(data) => { - intents_inbox + intents_inbox_2 .send(intents::Input::SubmitIntent(data)) .await?; } } } + drop(intents_inbox_2); Ok(()) }); + let intents_inbox_2 = intents_inbox.clone(); let intersection_loop = with_span(error_span!("intersection"), async { use aoi_finder::Output; let mut gen = IntersectionFinder::run_gen(caps.clone(), intersection_inbox_rx); @@ -186,20 +204,25 @@ pub(crate) async fn run_session( Output::SubmitAuthorisation(authorisation) => { pai_inbox .send(pai::Input::SubmitAuthorisation(authorisation)) - .await?; + .await + .ok(); } Output::AoiIntersection(intersection) => { let area = intersection.intersection.clone(); let namespace = intersection.namespace; reconciler_inbox .send(reconciler::Input::AoiIntersection(intersection.clone())) - .await?; + .await + .ok(); let event = EventKind::InterestIntersection { namespace, area }; - intents_inbox.send(intents::Input::EmitEvent(event)).await?; + intents_inbox_2 + .send(intents::Input::EmitEvent(event)) + .await?; if let Some(data_inbox) = &data_inbox { data_inbox .send(data::Input::AoiIntersection(intersection.clone())) - .await?; + .await + .ok(); } } Output::SignAndSendCapability { handle, capability } => { @@ -208,9 +231,11 @@ pub(crate) async fn run_session( } } } + drop(intents_inbox_2); Ok(()) }); + let intents_inbox_2 = intents_inbox.clone(); let pai_loop = with_span(error_span!("pai"), async { use pai::Output; let inbox = pai_inbox_rx.merge(intersection_recv.map(pai::Input::ReceivedMessage)); @@ -223,12 +248,12 @@ pub(crate) async fn run_session( namespace: intersection.authorisation.namespace(), area: intersection.authorisation.read_cap().granted_area().clone(), }; - ( + let _ = ( intersection_inbox.send(aoi_finder::Input::PaiIntersection(intersection)), - intents_inbox.send(intents::Input::EmitEvent(event)), + intents_inbox_2.send(intents::Input::EmitEvent(event)), ) - .try_join() - .await?; + .join() + .await; } Output::SignAndSendSubspaceCap(handle, cap) => { let message = caps.sign_subspace_capability(store.secrets(), cap, handle)?; @@ -236,9 +261,11 @@ pub(crate) async fn run_session( } } } + drop(intents_inbox_2); Ok(()) }); + let intents_inbox_2 = intents_inbox.clone(); let reconciler_loop = with_span(error_span!("reconciler"), async { use reconciler::Output; let mut gen = Reconciler::run_gen( @@ -254,7 +281,7 @@ pub(crate) async fn run_session( while let Some(output) = gen.try_next().await? { match output { Output::ReconciledArea { namespace, area } => { - intents_inbox + intents_inbox_2 .send(intents::Input::EmitEvent(EventKind::Reconciled { namespace, area, @@ -265,12 +292,13 @@ pub(crate) async fn run_session( // Stop session if not in live mode; if !mode.is_live() { debug!("close session (reconciliation finished and not in live mode)"); - cancel_token.cancel(); + close_session_token.cancel(); break; } } } } + drop(intents_inbox_2); Ok(()) }); @@ -287,13 +315,12 @@ pub(crate) async fn run_session( caps.validate_and_bind_theirs(message.capability.0, message.signature)?; pai_inbox .send(pai::Input::ReceivedReadCapForIntersection(handle)) - .await?; + .await + .ok(); } Ok(()) }); - let mut we_cancelled = false; - let control_loop = with_span(error_span!("control"), async { let res = control_loop( control_recv, @@ -304,12 +331,8 @@ pub(crate) async fn run_session( &event_sender, ) .await; - if !cancel_token.is_cancelled() { - debug!("close session (closed by peer)"); - cancel_token.cancel(); - } else { - we_cancelled = true; - } + // Once the control loop closed, close the inboxes. + close_inboxes_token.cancel(); res }); @@ -330,11 +353,14 @@ pub(crate) async fn run_session( namespace, aoi: area_of_interest, }) - .await?; + .await + .ok(); } Ok(()) }); + drop(intents_inbox); + let result = ( intents_fut, control_loop, @@ -350,25 +376,35 @@ pub(crate) async fn run_session( .try_join() .await; + let result = result.map_err(Arc::new).map(|_| ()); + // Unsubscribe from the store. store.entries().unsubscribe(&session_id); - let result = result.map_err(Arc::new).map(|_| ()); + // Track if we closed the session by triggering the cancel token, or if the remote peer closed + // the session by closing the control channel. + let we_cancelled = close_session_token.is_cancelled(); - debug!(error=?result.as_ref().err(), ?we_cancelled, "session complete"); - - let remaining_intents = match result.as_ref() { + let mut remaining_intents = vec![]; + match &result { Ok(()) => { - // If the session closed without an error, return the remaining intents - // so that they can potentially be restarted. - intents.drain_all() + // If the session did not error, we drain our queued intents to retry them. + remaining_intents.append(&mut intents.drain_all().await); } - Err(err) => { - // If the session closed with error, abort the intents with that error. - intents.abort_all(err.clone()).await; - vec![] + Err(error) => { + // If the session errored, we abort our queued intents. + intents.abort_all(error.clone()).await; } - }; + } + // Append intents that are still queued in the update receiver channel. + let mut update_receiver = update_receiver.into_inner().into_inner(); + update_receiver.close(); + while let Some(update) = update_receiver.recv().await { + match update { + SessionUpdate::SubmitIntent(intent) => remaining_intents.push(intent), + } + } + debug!(error=?result.as_ref().err(), remaining_intents=remaining_intents.len(), ?we_cancelled, "session complete"); if let Err(_receiver_dropped) = event_sender .send(SessionEvent::Complete { @@ -376,7 +412,6 @@ pub(crate) async fn run_session( we_cancelled, senders: channel_sender, remaining_intents, - update_receiver: update_receiver.into_inner().into_inner(), }) .await { @@ -391,7 +426,7 @@ async fn control_loop( our_role: Role, caps: &Capabilities, sender: &ChannelSenders, - pai_inbox: &Sender, + pai_inbox: &mpsc::Sender, event_sender: &EventSender, ) -> Result<(), Error> { // Reveal our nonce. @@ -447,32 +482,22 @@ async fn control_loop( Ok(()) } +fn channel(cap: usize) -> (mpsc::Sender, mpsc::Receiver) { + let (tx, rx) = mpsc::channel(cap); + (tx, rx) +} + fn cancelable_channel( cap: usize, cancel_token: CancellationToken, -) -> (Sender, Cancelable>) { +) -> (mpsc::Sender, CancelableReceiver) { let (tx, rx) = mpsc::channel(cap); ( - Sender(tx), - Cancelable::new(ReceiverStream::new(rx), cancel_token), + tx, + CancelableReceiver::new(ReceiverStream::new(rx), cancel_token), ) } -#[derive(Debug)] -pub struct Sender(mpsc::Sender); - -impl Clone for Sender { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl Sender { - async fn send(&self, item: T) -> Result<(), ChannelReceiverDropped> { - self.0.send(item).await.map_err(|_| ChannelReceiverDropped) - } -} - async fn with_span( span: Span, fut: impl Future>, diff --git a/iroh-willow/src/util/stream.rs b/iroh-willow/src/util/stream.rs index ce93003467..aade2ba020 100644 --- a/iroh-willow/src/util/stream.rs +++ b/iroh-willow/src/util/stream.rs @@ -5,6 +5,7 @@ use std::{ }; use futures_lite::Stream; +use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; /// Wrapper around [`Stream`] that takes a cancel token to cancel the stream. @@ -51,3 +52,49 @@ impl Stream for Cancelable { } } } + +/// Wrapper around [`ReceiverStream`] that can be closed with a [`CancellationToken`]. +#[derive(Debug)] +pub struct CancelableReceiver { + receiver: ReceiverStream, + cancelled: Pin>, + is_cancelled: bool, +} + +impl CancelableReceiver { + pub fn new(receiver: ReceiverStream, cancel_token: CancellationToken) -> Self { + let is_cancelled = cancel_token.is_cancelled(); + Self { + receiver, + cancelled: Box::pin(cancel_token.cancelled_owned()), + is_cancelled, + } + } + + pub fn into_inner(self) -> ReceiverStream { + self.receiver + } +} + +impl Stream for CancelableReceiver { + type Item = T; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match Pin::new(&mut self.receiver).poll_next(cx) { + Poll::Ready(r) => Poll::Ready(r), + Poll::Pending => { + if !self.is_cancelled { + match Pin::new(&mut self.cancelled).poll(cx) { + Poll::Ready(()) => { + self.receiver.close(); + self.is_cancelled = true; + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + } + } else { + Poll::Pending + } + } + } + } +} From b7a3272621257663ebae67c079d88cfb8b8a96d1 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 11:36:36 +0200 Subject: [PATCH 151/198] feat: close channels on drop --- iroh-willow/src/util/channel.rs | 75 ++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 15 deletions(-) diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index 0f4d67ef22..b459a33c1a 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -1,10 +1,13 @@ use std::{ - cmp, + cmp::{self}, future::poll_fn, io, marker::PhantomData, pin::Pin, - sync::{Arc, Mutex}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Mutex, MutexGuard, + }, task::{self, Poll, Waker}, }; @@ -115,6 +118,13 @@ impl Guarantees { /// Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 #[derive(Debug)] struct Shared { + inner: Mutex, + sender_count: AtomicUsize, + receiver_count: AtomicUsize, +} + +#[derive(Debug)] +struct Inner { buf: BytesMut, max_buffer_size: usize, write_wakers: Vec, @@ -124,8 +134,8 @@ struct Shared { } impl Shared { - fn new(max_buffer_size: usize, guarantees: Guarantees) -> Arc> { - let shared = Self { + fn new(max_buffer_size: usize, guarantees: Guarantees) -> Arc { + let inner = Inner { buf: BytesMut::new(), max_buffer_size, write_wakers: Default::default(), @@ -133,9 +143,20 @@ impl Shared { is_closed: false, guarantees, }; - Arc::new(Mutex::new(shared)) + let shared = Self { + inner: Mutex::new(inner), + sender_count: AtomicUsize::new(1), + receiver_count: AtomicUsize::new(1), + }; + Arc::new(shared) + } + + fn lock(&self) -> std::sync::LockResult> { + self.inner.lock() } +} +impl Inner { // fn set_max_buffer_size(&mut self, max_buffer_size: usize) -> bool { // if max_buffer_size >= self.buf.len() { // self.max_buffer_size = max_buffer_size; @@ -300,7 +321,7 @@ impl Shared { /// Asynchronous reader to read bytes from a channel. #[derive(Debug)] pub struct Reader { - shared: Arc>, + shared: Arc, } impl Reader { @@ -324,7 +345,7 @@ impl Reader { /// The writer implements [`AsyncWrite`]. #[derive(Debug)] pub struct Writer { - shared: Arc>, + shared: Arc, } impl Writer { @@ -368,7 +389,7 @@ impl AsyncWrite for Writer { #[derive(Debug)] pub struct Sender { - shared: Arc>, + shared: Arc, _ty: PhantomData, } @@ -400,7 +421,7 @@ impl Sender { #[derive(Debug)] pub struct Receiver { - shared: Arc>, + shared: Arc, _ty: PhantomData, } @@ -434,8 +455,9 @@ impl Stream for Receiver { } } -impl Clone for Receiver { +impl Clone for Sender { fn clone(&self) -> Self { + self.shared.sender_count.fetch_add(1, Ordering::Relaxed); Self { shared: Arc::clone(&self.shared), _ty: PhantomData, @@ -443,11 +465,34 @@ impl Clone for Receiver { } } -impl Clone for Sender { - fn clone(&self) -> Self { - Self { - shared: Arc::clone(&self.shared), - _ty: PhantomData, +impl Drop for Sender { + fn drop(&mut self) { + if self.shared.sender_count.fetch_sub(1, Ordering::Relaxed) == 1 { + self.shared.lock().unwrap().close(); + } + } +} + +impl Drop for Writer { + fn drop(&mut self) { + if self.shared.sender_count.fetch_sub(1, Ordering::Relaxed) == 1 { + self.shared.lock().unwrap().close(); + } + } +} + +impl Drop for Reader { + fn drop(&mut self) { + if self.shared.receiver_count.fetch_sub(1, Ordering::Relaxed) == 1 { + self.shared.lock().unwrap().close(); + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if self.shared.receiver_count.fetch_sub(1, Ordering::Relaxed) == 1 { + self.shared.lock().unwrap().close(); } } } From 01ec02f2f17d54dfca2bafff489b567239703ee3 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 11:36:53 +0200 Subject: [PATCH 152/198] refactor: improve session shutdown logic --- iroh-willow/src/engine/actor.rs | 4 - iroh-willow/src/engine/peer_manager.rs | 38 +++++----- iroh-willow/src/net.rs | 101 +++++++++++++------------ iroh-willow/src/session.rs | 22 +----- iroh-willow/src/session/error.rs | 4 + iroh-willow/src/session/intents.rs | 94 ++++++++++++----------- iroh-willow/src/session/run.rs | 65 +++++++++++----- iroh-willow/src/store/auth.rs | 6 +- iroh-willow/tests/basic.rs | 2 +- 9 files changed, 178 insertions(+), 158 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index f762646f44..ab379a5fa5 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -7,7 +7,6 @@ use tokio::{ sync::{mpsc, oneshot}, task::JoinSet, }; -use tokio_util::sync::CancellationToken; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ @@ -345,7 +344,6 @@ impl Actor { } => { let session_id = self.next_session_id(); let store = self.store.clone(); - let cancel_token = CancellationToken::new(); let (update_tx, update_rx) = mpsc::channel(SESSION_UPDATE_CHANNEL_CAP); let (event_tx, event_rx) = mpsc::channel(SESSION_EVENT_CHANNEL_CAP); @@ -356,7 +354,6 @@ impl Actor { store, conn, intents, - cancel_token.clone(), session_id, EventSender(event_tx), update_rx, @@ -370,7 +367,6 @@ impl Actor { }); let handle = SessionHandle { - cancel_token, update_tx, event_rx, }; diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 2682ab58f5..68df7c87e2 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -157,7 +157,7 @@ impl PeerManager { Input::SubmitIntent { peer, intent } => self.submit_intent(peer, intent).await, Input::HandleConnection { conn } => self.handle_connection(conn).await, Input::Shutdown { reply } => { - self.init_shutdown(); + self.init_shutdown().await; if self.conn_tasks.is_empty() { reply.send(()).ok(); break; @@ -348,8 +348,9 @@ impl PeerManager { } => { if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { debug!("failed to submit intent into active session, queue in peer state"); - let SessionUpdate::SubmitIntent(intent) = err.0; - intents_after_close.push(intent); + if let SessionUpdate::SubmitIntent(intent) = err.0 { + intents_after_close.push(intent); + } } else { trace!("intent sent to session"); } @@ -413,7 +414,6 @@ impl PeerManager { .peers .get_mut(&peer) .context("got conn task output for unknown peer")?; - trace!(out=?out.as_ref().map(|o| format!("{o}")), "conn task output"); match out { Err(err) => { trace!(?our_role, current_state=%peer_info.state, "conn task failed: {err:#?}"); @@ -460,16 +460,12 @@ impl PeerManager { ) .await; } - PeerState::Active { .. } => { - // We do not care about intents here, they will be handled in the - // session (which will error as well because all channels are now - // closed). + PeerState::Active { update_tx, .. } => { warn!(?err, "connection failed while active"); - // TODO:(Frando): Not sure if this is good practice? - // A `debug_assert` is far too much, because this can be triggered by other peers. - // However in tests I want to make sure that *all* connections terminate gracefully. - #[cfg(test)] - panic!("connection failed: {err:?}"); + update_tx + .send(SessionUpdate::Abort(Error::ConnectionClosed(err))) + .await + .ok(); } PeerState::None => { warn!(?err, "connection failed while peer is in None state"); @@ -487,6 +483,10 @@ impl PeerManager { ref mut intents, .. } = &mut peer_info.state else { + debug!( + ?our_role, + "got connection ready for peer in non-pending state" + ); conn.close(ERROR_CODE_FAIL, b"invalid-state"); drop(conn); // TODO: unreachable? @@ -528,7 +528,6 @@ impl PeerManager { let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); let SessionHandle { - cancel_token, update_tx, event_rx, } = session_handle; @@ -537,7 +536,6 @@ impl PeerManager { peer_info.state = PeerState::Active { update_tx, - cancel_token, intents_after_close: vec![], }; peer_info.abort_handle = Some(abort_handle); @@ -578,7 +576,7 @@ impl PeerManager { Ok(()) } - fn init_shutdown(&mut self) { + async fn init_shutdown(&mut self) { self.shutting_down = true; for peer in self.peers.values() { match &peer.state { @@ -591,9 +589,12 @@ impl PeerManager { } } PeerState::Closing { .. } => {} - PeerState::Active { cancel_token, .. } => { + PeerState::Active { update_tx, .. } => { // We are in active state. We cancel our session, which leads to graceful connection termination. - cancel_token.cancel(); + update_tx + .send(SessionUpdate::Abort(Error::ShuttingDown)) + .await + .ok(); } } } @@ -642,7 +643,6 @@ enum PeerState { cancel_dial: Option, }, Active { - cancel_token: CancellationToken, update_tx: mpsc::Sender, /// List of intents that we failed to submit into the session because it is closing. intents_after_close: Vec, diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index d244f0183c..aee2892b36 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -188,34 +188,41 @@ pub(crate) fn prepare_channels( let cap = find(Channel::Logical(LogicalChannel::Capability))?; let dat = find(Channel::Logical(LogicalChannel::Data))?; - let fut = (ctrl.2, pai.2, rec.2, stt.2, aoi.2, cap.2, dat.2) - .try_join() - .map_ok(|_| ()); - - let logical_send = LogicalChannelSenders { - intersection_send: pai.0, - reconciliation_send: rec.0, - static_tokens_send: stt.0, - aoi_send: aoi.0, - capability_send: cap.0, - data_send: dat.0, - }; - let logical_recv = LogicalChannelReceivers { - intersection_recv: pai.1.into(), - reconciliation_recv: rec.1.into(), - static_tokens_recv: stt.1.into(), - aoi_recv: aoi.1.into(), - capability_recv: cap.1.into(), - data_recv: dat.1.into(), + let fut = async move { + let result = (ctrl.2, pai.2, rec.2, stt.2, aoi.2, cap.2, dat.2) + .try_join() + .map_ok(|_| ()) + .await; + if let Err(err) = &result { + debug!("channels closed with error: {err:#}"); + } else { + debug!("channels closed"); + } + result }; + let channels = Channels { send: ChannelSenders { control_send: ctrl.0, - logical_send, + logical_send: LogicalChannelSenders { + intersection_send: pai.0, + reconciliation_send: rec.0, + static_tokens_send: stt.0, + aoi_send: aoi.0, + capability_send: cap.0, + data_send: dat.0, + }, }, recv: ChannelReceivers { control_recv: ctrl.1, - logical_recv, + logical_recv: LogicalChannelReceivers { + intersection_recv: pai.1.into(), + reconciliation_recv: rec.1.into(), + static_tokens_recv: stt.1.into(), + aoi_recv: aoi.1.into(), + capability_recv: cap.1.into(), + data_recv: dat.1.into(), + }, }, }; Ok((channels, fut)) @@ -238,10 +245,10 @@ fn prepare_channel( let (sender, outbound_reader) = outbound_channel(cap, guarantees); let (inbound_writer, receiver) = inbound_channel(cap); - let recv_fut = recv_loop(recv_stream, inbound_writer) + let recv_fut = recv_loop(ch, recv_stream, inbound_writer) .map_err(move |e| e.context(format!("receive loop for {ch:?} failed"))); - let send_fut = send_loop(send_stream, outbound_reader) + let send_fut = send_loop(ch, send_stream, outbound_reader) .map_err(move |e| e.context(format!("send loop for {ch:?} failed"))); let fut = (recv_fut, send_fut).try_join().map_ok(|_| ()); @@ -249,8 +256,12 @@ fn prepare_channel( (sender, receiver, fut) } -async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> Result<()> { - trace!("recv: start"); +async fn recv_loop( + channel: Channel, + mut recv_stream: RecvStream, + mut channel_writer: Writer, +) -> Result<()> { + trace!(?channel, "recv: start"); let max_buffer_size = channel_writer.max_buffer_size(); while let Some(buf) = recv_stream .read_chunk(max_buffer_size, true) @@ -261,14 +272,17 @@ async fn recv_loop(mut recv_stream: RecvStream, mut channel_writer: Writer) -> R channel_writer.write_all(&buf.bytes[..]).await?; // trace!(len = buf.bytes.len(), "sent"); } - trace!("recv: stream close"); + trace!(?channel, "recv: stream close"); channel_writer.close(); - trace!("recv: done"); Ok(()) } -async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Result<()> { - trace!("send: start"); +async fn send_loop( + channel: Channel, + mut send_stream: SendStream, + channel_reader: Reader, +) -> Result<()> { + trace!(?channel, "send: start"); while let Some(data) = channel_reader.read_bytes().await { // let len = data.len(); // trace!(len, "send"); @@ -278,9 +292,9 @@ async fn send_loop(mut send_stream: SendStream, channel_reader: Reader) -> Resul .context("failed to write to quic stream")?; // trace!(len, "sent"); } - trace!("send: close writer"); + trace!(?channel, "send: close writer"); send_stream.finish().await?; - trace!("send: done"); + trace!(?channel, "send: done"); Ok(()) } @@ -426,14 +440,14 @@ mod tests { .await?; let (channels, fut) = prepare_channels(channel_streams)?; let net_task = tokio::task::spawn(fut.instrument(span)); - let willow_conn = ConnHandle { + let conn_handle = ConnHandle { initial_transmission, our_role, peer, channels, }; - let handle = actor.init_session(willow_conn, intents).await?; - Ok((handle, net_task)) + let session_handle = actor.init_session(conn_handle, intents).await?; + Ok((session_handle, net_task)) } #[tokio::test(flavor = "multi_thread")] @@ -666,7 +680,7 @@ mod tests { let init_alfie = SessionInit::new(Interests::All, SessionMode::Continuous); let init_betty = SessionInit::new(Interests::All, SessionMode::Continuous); - let (intent_alfie, mut intent_handle_alfie) = Intent::new(init_alfie); + let (intent_alfie, intent_handle_alfie) = Intent::new(init_alfie); let (intent_betty, mut intent_handle_betty) = Intent::new(init_betty); let nonce_alfie = AccessChallenge::generate_with_rng(&mut rng); @@ -698,7 +712,9 @@ mod tests { // TODO: replace with event tokio::time::sleep(Duration::from_secs(1)).await; - session_alfie.close(); + // Drop the alfie intent, which closes the session. + drop(intent_handle_alfie); + let (senders_alfie, _alfie_cancelled) = session_alfie .complete() .await @@ -717,14 +733,9 @@ mod tests { r1.unwrap(); r2.unwrap(); - let (res_alfie, res_betty) = tokio::join!( - intent_handle_alfie.complete(), - intent_handle_betty.complete() - ); - info!(time=?start.elapsed(), "reconciliation finished"); - info!("alfie intent res {:?}", res_alfie); + let res_betty = intent_handle_betty.complete().await; + info!(time=?start.elapsed(), "finished"); info!("betty intent res {:?}", res_betty); - assert!(res_alfie.is_ok()); assert!(res_betty.is_ok()); tokio::try_join!( @@ -733,10 +744,6 @@ mod tests { ) .expect("failed to close both connections gracefully"); - info!("alfie session res {:?}", res_alfie); - info!("betty session res {:?}", res_betty); - assert!(res_alfie.is_ok()); - assert!(res_betty.is_ok()); let alfie_entries = get_entries(&handle_alfie, namespace_id).await?; let betty_entries = get_entries(&handle_betty, namespace_id).await?; info!("alfie has now {} entries", alfie_entries.len()); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 0a7bf135be..c5165d5239 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -11,7 +11,6 @@ use std::sync::Arc; use channels::ChannelSenders; use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; use crate::{ interest::Interests, @@ -138,6 +137,7 @@ pub(crate) enum SessionEvent { #[derive(Debug)] pub(crate) enum SessionUpdate { SubmitIntent(Intent), + Abort(Error), } /// Handle to an active session. @@ -145,7 +145,6 @@ pub(crate) enum SessionUpdate { /// This is not made public, the only public interface are [`intents`] handles. #[derive(Debug)] pub(crate) struct SessionHandle { - pub(crate) cancel_token: CancellationToken, pub(crate) update_tx: mpsc::Sender, pub(crate) event_rx: mpsc::Receiver, } @@ -175,23 +174,4 @@ impl SessionHandle { } Err(Arc::new(Error::ActorFailed)) } - - // /// Submit a new synchronisation intent. - // pub(crate) async fn submit_intent(&self, intent: Intent) -> anyhow::Result<()> { - // self.update_tx - // .send(SessionUpdate::SubmitIntent(intent)) - // .await?; - // Ok(()) - // } - - /// Finish the session gracefully. - /// - /// After calling this, no further protocol messages will be sent from this node. - /// Previously queued messages will still be sent out. The session will only be closed - /// once the other peer closes their senders as well. - #[cfg(test)] - pub(crate) fn close(&self) { - tracing::debug!("close session (session handle close called)"); - self.cancel_token.cancel(); - } } diff --git a/iroh-willow/src/session/error.rs b/iroh-willow/src/session/error.rs index 161225655c..c3b8e89af3 100644 --- a/iroh-willow/src/session/error.rs +++ b/iroh-willow/src/session/error.rs @@ -78,6 +78,10 @@ pub enum Error { ShuttingDown, #[error("The operation was cancelled locally")] Cancelled, + #[error("Connection was closed by peer")] + ConnectionClosed(#[source] anyhow::Error), + #[error("Session was closed by peer")] + SessionClosedByPeer, } #[derive(Debug, thiserror::Error)] diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 3a6fee86e6..1536947e2c 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -268,6 +268,39 @@ pub(super) enum Output { AllIntentsDropped, } +#[derive(Debug)] +pub(crate) struct RemainingIntents { + pub(crate) active_incomplete: Vec>, + pub(crate) queued: Vec, +} + +impl RemainingIntents { + /// Abort both incomplete active and queued unprocessed intents. + pub async fn abort_all(self, error: Arc) { + let futs = Iterator::chain( + self.queued + .into_iter() + .flat_map(|intent| intent.channels.map(|ch| ch.event_tx)), + self.active_incomplete, + ) + .map(|event_tx| { + let error = error.clone(); + async move { event_tx.send(EventKind::Abort { error }).await } + }); + let _ = futures_buffered::join_all(futs).await; + } + + /// Abort incomplete active intents, and return queued unprocessed intents. + pub async fn abort_active(self, error: Arc) -> Vec { + let futs = self.active_incomplete.into_iter().map(|event_tx| { + let error = error.clone(); + async move { event_tx.send(EventKind::Abort { error }).await } + }); + let _ = futures_buffered::join_all(futs).await; + self.queued + } +} + #[derive(derive_more::Debug)] pub(super) struct IntentDispatcher { inbox: mpsc::Receiver, @@ -297,60 +330,31 @@ impl IntentDispatcher { } } - /// Aborts all registered intents. - pub(super) async fn abort_all(&self, error: Arc) { - let _ = futures_buffered::join_all( - Iterator::chain( - self.pending_intents - .iter() - .flat_map(|intent| intent.channels.as_ref()) - .map(|ch| &ch.event_tx), - self.intents - .values() - .flat_map(|intent| intent.event_tx.as_ref()), - ) - .map(|event_tx| { - let error = error.clone(); - async move { event_tx.send(EventKind::Abort { error }).await } - }), - ) - .await; - } + pub(super) async fn drain_all(mut self) -> RemainingIntents { + let mut queued = vec![]; - /// Takes self and returns all pending intents. - pub(super) async fn drain_all(mut self) -> Vec { // Drain inbox. - let mut pending_intents = vec![]; self.inbox.close(); - while let Ok(item) = self.inbox.try_recv() { + while let Some(item) = self.inbox.recv().await { match item { - Input::EmitEvent(event) => { - self.emit_event_inner(event).await; - } - Input::SubmitIntent(intent) => pending_intents.push(intent), + Input::EmitEvent(event) => self.emit_event_inner(event).await, + Input::SubmitIntent(intent) => queued.push(intent), } } // Drain pending intents. - pending_intents.extend(self.pending_intents.into_iter()); - - // Abort active intents. - let error = Arc::new(Error::Cancelled); - let active_intents = self.intents.drain().filter_map(|(_id, info)| { - if info.is_complete() { - None - } else { - info.event_tx - } - }); - let _ = futures_buffered::join_all(active_intents.map(|event_tx| { - let error = error.clone(); - async move { event_tx.send(EventKind::Abort { error }).await } - })) - .await; + queued.extend(self.pending_intents.into_iter()); - // Return pending (unprocessed) intents. - pending_intents + // Drain incomplete active intents + let active_incomplete = self + .intents + .drain() + .filter_map(|(_id, info)| info.is_complete().then_some(info.event_tx).flatten()); + + RemainingIntents { + queued, + active_incomplete: active_incomplete.collect(), + } } /// Run the [`IntentDispatcher`]. diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index 20a031c149..e5324e3f57 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -9,7 +9,7 @@ use strum::IntoEnumIterator; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; -use tracing::{debug, error_span, trace, warn, Instrument, Span}; +use tracing::{debug, error_span, trace, Instrument, Span}; use crate::{ net::ConnHandle, @@ -45,7 +45,6 @@ pub(crate) async fn run_session( store: Store, conn: ConnHandle, initial_intents: Vec, - close_session_token: CancellationToken, session_id: SessionId, event_sender: EventSender, update_receiver: ReceiverStream, @@ -86,6 +85,7 @@ pub(crate) async fn run_session( debug!(role = ?our_role, ?mode, "start session"); // Make all our receivers close once the close session token is triggered. + let close_session_token = CancellationToken::new(); let control_recv = Cancelable::new(control_recv, close_session_token.clone()); let reconciliation_recv = Cancelable::new(reconciliation_recv, close_session_token.clone()); let intersection_recv = Cancelable::new(intersection_recv, close_session_token.clone()); @@ -179,6 +179,8 @@ pub(crate) async fn run_session( } }); + let mut abort_err = None; + let intents_inbox_2 = intents_inbox.clone(); let update_loop = with_span(error_span!("update"), async { while let Some(update) = update_receiver.next().await { @@ -188,6 +190,11 @@ pub(crate) async fn run_session( .send(intents::Input::SubmitIntent(data)) .await?; } + SessionUpdate::Abort(err) => { + abort_err = Some(err); + close_session_token.cancel(); + break; + } } } drop(intents_inbox_2); @@ -359,6 +366,9 @@ pub(crate) async fn run_session( Ok(()) }); + // Drop the intents_inbox. We cloned this into the different loops, and the clones are dropped + // at the end of the loops - which means the intents loop will terminate once all other loops + // that potentially send into the intents inbox. drop(intents_inbox); let result = ( @@ -376,8 +386,6 @@ pub(crate) async fn run_session( .try_join() .await; - let result = result.map_err(Arc::new).map(|_| ()); - // Unsubscribe from the store. store.entries().unsubscribe(&session_id); @@ -385,25 +393,45 @@ pub(crate) async fn run_session( // the session by closing the control channel. let we_cancelled = close_session_token.is_cancelled(); - let mut remaining_intents = vec![]; - match &result { - Ok(()) => { - // If the session did not error, we drain our queued intents to retry them. - remaining_intents.append(&mut intents.drain_all().await); - } - Err(error) => { - // If the session errored, we abort our queued intents. - intents.abort_all(error.clone()).await; - } - } - // Append intents that are still queued in the update receiver channel. + // Close the update receiver channel. let mut update_receiver = update_receiver.into_inner().into_inner(); update_receiver.close(); + + // Drain incomplete intents that are still in the intent dispatcher. + let mut remaining_intents = intents.drain_all().await; + + // Drain the update receiver channel. while let Some(update) = update_receiver.recv().await { match update { - SessionUpdate::SubmitIntent(intent) => remaining_intents.push(intent), + SessionUpdate::SubmitIntent(intent) => remaining_intents.queued.push(intent), + SessionUpdate::Abort(err) => { + abort_err = Some(err); + } } } + + let result = match (result, abort_err) { + (_, Some(err)) => Err(Arc::new(err)), + (Err(err), None) => Err(Arc::new(err)), + _ => Ok(()), + }; + + let remaining_intents = match result.as_ref() { + Err(err) => { + remaining_intents.abort_all(err.clone()).await; + vec![] + } + Ok(()) if we_cancelled => { + drop(remaining_intents.active_incomplete); + remaining_intents.queued + } + Ok(()) => { + remaining_intents + .abort_active(Arc::new(Error::SessionClosedByPeer)) + .await + } + }; + debug!(error=?result.as_ref().err(), remaining_intents=remaining_intents.len(), ?we_cancelled, "session complete"); if let Err(_receiver_dropped) = event_sender @@ -415,7 +443,7 @@ pub(crate) async fn run_session( }) .await { - warn!("failed to send session complete event: receiver dropped"); + debug!("failed to send session complete event: receiver dropped"); } result @@ -478,6 +506,7 @@ async fn control_loop( _ => return Err(Error::UnsupportedMessage), } } + trace!("control loop closing"); Ok(()) } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 9198692ad1..54d1511c99 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -7,7 +7,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; use ed25519_dalek::SignatureError; use meadowcap::{IsCommunal, NamespaceIsNotCommunalError, OwnedCapabilityCreationError}; -use tracing::debug; +use tracing::{debug, trace}; use crate::{ interest::{ @@ -58,7 +58,7 @@ impl Auth { caps: impl IntoIterator, ) -> Result<(), AuthError> { for cap in caps.into_iter() { - tracing::debug!("import cap {cap:?}"); + debug!(?cap, "import cap"); cap.validate()?; // Only allow importing caps we can use. // TODO: Is this what we want? @@ -67,7 +67,7 @@ impl Auth { return Err(AuthError::MissingUserSecret(user_id)); } self.caps.insert(cap)?; - tracing::debug!("imported"); + trace!("imported"); } Ok(()) } diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 2e2a6afe9c..88af960c98 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -186,7 +186,7 @@ async fn peer_manager_update_intent() -> Result<()> { intent.close().await; - assert!(intent.next().await.is_none(),); + assert!(intent.next().await.is_none()); [alfie, betty].map(Peer::shutdown).try_join().await?; Ok(()) From aad64b973186f783afda8441cd65875e7a13b778 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 12:15:41 +0200 Subject: [PATCH 153/198] fix: handle empty payloads --- iroh-willow/src/session/payload.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index d89cf4fb44..d5c099ac83 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -130,12 +130,17 @@ impl CurrentPayload { pub async fn finalize(&mut self) -> Result<(), Error> { let state = self.0.take().ok_or(Error::InvalidMessageInCurrentState)?; - let writer = state - .writer - .ok_or_else(|| Error::InvalidMessageInCurrentState)?; - drop(writer.sender); - let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; - if *tag.hash() != state.payload_digest.0 { + let (hash, len) = match state.writer { + Some(writer) => { + drop(writer.sender); + let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; + (*tag.hash(), len) + } + // The writer is only empty if we did not receive any chunks. In this case, the + // "received data" is that of the empty hash with size 0. + None => (iroh_base::hash::Hash::EMPTY, 0), + }; + if hash != state.payload_digest.0 { return Err(Error::PayloadDigestMismatch); } if len != state.expected_length { @@ -145,9 +150,6 @@ impl CurrentPayload { // we could store a tag for each blob // however we really want reference counting here, not individual tags // can also fallback to the naive impl from iroh-docs to just protect all docs hashes on gc - // let hash_and_format = *tag.inner(); - // let name = b"foo"; - // store.set_tag(name, Some(hash_and_format)); Ok(()) } From bfd1da9829c369858d2f7341b6f6dea3a1988877 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 12:16:03 +0200 Subject: [PATCH 154/198] fix: handle session errors correctly --- iroh-willow/src/net.rs | 6 ++-- iroh-willow/src/session/intents.rs | 5 +++- iroh-willow/tests/basic.rs | 47 ++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index aee2892b36..71b3b10119 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -389,8 +389,7 @@ async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { #[cfg(test)] mod tests { use std::{ - collections::BTreeSet, - time::{Duration, Instant}, + collections::BTreeSet, sync::Arc, time::{Duration, Instant} }; use anyhow::Result; @@ -736,7 +735,8 @@ mod tests { let res_betty = intent_handle_betty.complete().await; info!(time=?start.elapsed(), "finished"); info!("betty intent res {:?}", res_betty); - assert!(res_betty.is_ok()); + assert!(res_betty.is_err()); + assert_eq!(res_betty, Err(Arc::new(crate::session::Error::SessionClosedByPeer))); tokio::try_join!( terminate_gracefully(&conn_alfie), diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 1536947e2c..24c77bcae5 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -349,7 +349,7 @@ impl IntentDispatcher { let active_incomplete = self .intents .drain() - .filter_map(|(_id, info)| info.is_complete().then_some(info.event_tx).flatten()); + .filter_map(|(_id, info)| (!info.is_complete()).then_some(info.event_tx).flatten()); RemainingIntents { queued, @@ -617,7 +617,10 @@ impl IntentInfo { if matches { self.send(event.clone()).await?; if is_reconciled && self.interests.is_empty() { + debug!("SEND RECONCILED ALL"); self.send(EventKind::ReconciledAll).await? + } else { + debug!("DO NOT SEND RECONCILED ALL"); } } Ok(self.is_complete()) diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 88af960c98..fd01eaf55c 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -433,3 +433,50 @@ mod util { Ok(()) } } + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_empty_payload() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_empty_payload"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + insert(&betty, namespace, betty_user, &[b"foo"], "").await?; + + let init = SessionInit::new(Interests::all(), SessionMode::ReconcileOnce); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::CapabilityIntersection { + namespace, + area: Area::new_full(), + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::InterestIntersection { + namespace, + area: Area::new_full().into_area_of_interest() + } + ); + + assert_eq!( + intent.next().await.unwrap(), + EventKind::Reconciled { + namespace, + area: Area::new_full().into_area_of_interest() + } + ); + + assert_eq!(intent.next().await.unwrap(), EventKind::ReconciledAll); + + assert!(intent.next().await.is_none()); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + + Ok(()) +} From 75e226160d45c7b075ef82ffee638e0b8831065c Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 22:52:51 +0200 Subject: [PATCH 155/198] chore: fmt --- iroh-willow/src/net.rs | 9 +++++++-- iroh-willow/src/proto/wgps/messages.rs | 3 ++- iroh-willow/src/session/intents.rs | 3 --- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 71b3b10119..185f4b796a 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -389,7 +389,9 @@ async fn wait_for_goodbye_or_graceful_close(conn: &Connection) -> Result<()> { #[cfg(test)] mod tests { use std::{ - collections::BTreeSet, sync::Arc, time::{Duration, Instant} + collections::BTreeSet, + sync::Arc, + time::{Duration, Instant}, }; use anyhow::Result; @@ -736,7 +738,10 @@ mod tests { info!(time=?start.elapsed(), "finished"); info!("betty intent res {:?}", res_betty); assert!(res_betty.is_err()); - assert_eq!(res_betty, Err(Arc::new(crate::session::Error::SessionClosedByPeer))); + assert_eq!( + res_betty, + Err(Arc::new(crate::session::Error::SessionClosedByPeer)) + ); tokio::try_join!( terminate_gracefully(&conn_alfie), diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index d54d26b2a2..c1cdaa7593 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -388,9 +388,10 @@ pub struct ReconciliationSendEntry { } /// Transmit some transformed Payload bytes. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(derive_more::Debug, Clone, Serialize, Deserialize)] pub struct ReconciliationSendPayload { // A substring of the bytes obtained by applying transform_payload to the Payload to be transmitted. + #[debug("Bytes({})", self.bytes.len())] pub bytes: bytes::Bytes, } diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 24c77bcae5..96327dca07 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -617,10 +617,7 @@ impl IntentInfo { if matches { self.send(event.clone()).await?; if is_reconciled && self.interests.is_empty() { - debug!("SEND RECONCILED ALL"); self.send(EventKind::ReconciledAll).await? - } else { - debug!("DO NOT SEND RECONCILED ALL"); } } Ok(self.is_complete()) From 0b7bed8462caa97fe9e6205aeaa892ba9fc0a6e0 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 22:54:57 +0200 Subject: [PATCH 156/198] refactor: better namings in reconciler --- iroh-willow/src/session/reconciler.rs | 74 ++++++++++++++------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 62f8ae1d1b..0f2ccadb0c 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -58,7 +58,7 @@ pub struct Reconciler { shared: Shared, recv: Cancelable>, targets: TargetMap, - current_entry: CurrentEntry, + entry_state: EntryState, } type TargetId = (AreaOfInterestHandle, AreaOfInterestHandle); @@ -92,7 +92,7 @@ impl Reconciler { shared, recv, targets: TargetMap::new(inbox), - current_entry: Default::default(), + entry_state: Default::default(), } .run() }) @@ -130,13 +130,13 @@ impl Reconciler { target .received_send_fingerprint(&self.shared, message) .await?; - if target.is_complete() && self.current_entry.is_none() { + if target.is_complete() && self.entry_state.is_empty() { self.complete_target(target_id).await?; } } ReconciliationMessage::AnnounceEntries(message) => { let target_id = message.handles(); - self.current_entry + self.entry_state .received_announce_entries(target_id, message.count)?; let target = self .targets @@ -145,7 +145,7 @@ impl Reconciler { target .received_announce_entries(&self.shared, message) .await?; - if target.is_complete() && self.current_entry.is_none() { + if target.is_complete() && self.entry_state.is_empty() { self.complete_target(target_id).await?; } } @@ -159,8 +159,9 @@ impl Reconciler { message.dynamic_token, ) .await?; - self.current_entry.received_entry( + self.entry_state.received_send_entry( *authorised_entry.entry().payload_digest(), + authorised_entry.entry().payload_length(), message.entry.available, )?; self.shared.store.entries().ingest( @@ -172,13 +173,13 @@ impl Reconciler { )?; } ReconciliationMessage::SendPayload(message) => { - self.current_entry + self.entry_state .received_send_payload(self.shared.store.payloads(), message.bytes) .await?; } ReconciliationMessage::TerminatePayload(_message) => { if let Some(completed_target) = - self.current_entry.received_terminate_payload().await? + self.entry_state.received_terminate_payload().await? { let target = self .targets @@ -284,45 +285,43 @@ impl TargetMap { } #[derive(Debug, Default)] -struct CurrentEntry(Option); +struct EntryState(Option); -impl CurrentEntry { - pub fn is_none(&self) -> bool { +impl EntryState { + pub fn is_empty(&self) -> bool { self.0.is_none() } - pub fn received_announce_entries( - &mut self, - target: TargetId, - count: u64, - ) -> Result, Error> { + pub fn received_announce_entries(&mut self, target: TargetId, count: u64) -> Result<(), Error> { if self.0.is_some() { return Err(Error::InvalidMessageInCurrentState); } if let Some(count) = NonZeroU64::new(count) { - self.0 = Some(EntryState { + self.0 = Some(EntryStateInner { target, - remaining: Some(count), - payload: CurrentPayload::default(), + remaining_entries: Some(count), + current_payload: CurrentPayload::default(), }); - Ok(None) - } else { - Ok(Some(target)) } + Ok(()) } - pub fn received_entry( + pub fn received_send_entry( &mut self, payload_digest: PayloadDigest, - expected_length: u64, + _total_payload_length: u64, + available_payload_length: u64, ) -> Result<(), Error> { let state = self.get_mut()?; - state.payload.ensure_none()?; - state.remaining = match state.remaining.take() { + state.current_payload.ensure_none()?; + state.remaining_entries = match state.remaining_entries.take() { None => return Err(Error::InvalidMessageInCurrentState), Some(c) => NonZeroU64::new(c.get().saturating_sub(1)), }; - state.payload.set(payload_digest, expected_length)?; + state.current_payload.set( + payload_digest, + available_payload_length, + )?; Ok(()) } @@ -331,15 +330,18 @@ impl CurrentEntry { store: &P, bytes: Bytes, ) -> Result<(), Error> { - self.get_mut()?.payload.recv_chunk(store, bytes).await?; + self.get_mut()? + .current_payload + .recv_chunk(store, bytes) + .await?; Ok(()) } pub async fn received_terminate_payload(&mut self) -> Result, Error> { - let s = self.get_mut()?; - s.payload.finalize().await?; - if s.remaining.is_none() { - let target_id = s.target; + let state = self.get_mut()?; + state.current_payload.finalize().await?; + if state.remaining_entries.is_none() { + let target_id = state.target; self.0 = None; Ok(Some(target_id)) } else { @@ -347,7 +349,7 @@ impl CurrentEntry { } } - pub fn get_mut(&mut self) -> Result<&mut EntryState, Error> { + pub fn get_mut(&mut self) -> Result<&mut EntryStateInner, Error> { match self.0.as_mut() { Some(s) => Ok(s), None => Err(Error::InvalidMessageInCurrentState), @@ -356,10 +358,10 @@ impl CurrentEntry { } #[derive(Debug)] -struct EntryState { +struct EntryStateInner { target: TargetId, - remaining: Option, - payload: CurrentPayload, + remaining_entries: Option, + current_payload: CurrentPayload, } #[derive(derive_more::Debug)] From 168abbd495cfef0ebdacf2ded6e7c81fadf7a8ee Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Tue, 20 Aug 2024 23:11:34 +0200 Subject: [PATCH 157/198] feat: verified payload streaming --- Cargo.lock | 1 + iroh-blobs/src/store/traits.rs | 98 +++++++++++++++- iroh-willow/Cargo.toml | 1 + iroh-willow/src/proto/wgps.rs | 4 +- iroh-willow/src/session/data.rs | 26 ++--- iroh-willow/src/session/payload.rs | 129 +++++++++++---------- iroh-willow/src/session/reconciler.rs | 18 ++- iroh-willow/src/util.rs | 1 + iroh-willow/src/util/pipe.rs | 154 ++++++++++++++++++++++++++ iroh-willow/tests/basic.rs | 42 ++++++- 10 files changed, 385 insertions(+), 89 deletions(-) create mode 100644 iroh-willow/src/util/pipe.rs diff --git a/Cargo.lock b/Cargo.lock index be93de2f0b..65c961376f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3026,6 +3026,7 @@ dependencies = [ "hex", "iroh-base", "iroh-blobs", + "iroh-io", "iroh-metrics", "iroh-net", "iroh-quinn", diff --git a/iroh-blobs/src/store/traits.rs b/iroh-blobs/src/store/traits.rs index 901546a93e..0007f78c2e 100644 --- a/iroh-blobs/src/store/traits.rs +++ b/iroh-blobs/src/store/traits.rs @@ -2,14 +2,19 @@ use std::{collections::BTreeSet, future::Future, io, path::PathBuf}; use bao_tree::{ - io::fsm::{BaoContentItem, Outboard}, + io::{ + fsm::{ + encode_ranges_validated, BaoContentItem, Outboard, ResponseDecoder, ResponseDecoderNext, + }, + DecodeError, + }, BaoTree, ChunkRanges, }; use bytes::Bytes; use futures_lite::{Stream, StreamExt}; use genawaiter::rc::{Co, Gen}; use iroh_base::rpc::RpcError; -use iroh_io::AsyncSliceReader; +use iroh_io::{AsyncSliceReader, AsyncStreamReader, AsyncStreamWriter}; use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; @@ -92,6 +97,26 @@ pub trait MapEntry: std::fmt::Debug + Clone + Send + Sync + 'static { fn outboard(&self) -> impl Future> + Send; /// A future that resolves to a reader that can be used to read the data fn data_reader(&self) -> impl Future> + Send; + + /// Encodes data and outboard into a stream which can be imported with [`Store::import_verifiable_stream`]. + /// + /// Returns immediately without error if `start` is equal or larger than the entry's size. + fn write_verifiable_stream<'a>( + &'a self, + start: u64, + writer: impl AsyncStreamWriter + 'a, + ) -> impl Future> + 'a { + async move { + let size = self.size().value(); + if start >= size { + return Ok(()); + } + let ranges = range_from_offset_and_length(start, size - start); + let (outboard, data) = tokio::try_join!(self.outboard(), self.data_reader())?; + encode_ranges_validated(data, outboard, &ranges, writer).await?; + Ok(()) + } + } } /// A generic map from hashes to bao blobs (blobs with bao outboards). @@ -343,6 +368,70 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug { self.import_stream(stream, format, progress) } + /// Import a blob from a verified stream, as emitted by [`MapEntry::write_verifiable_stream`]; + fn import_verifiable_stream<'a>( + &'a self, + hash: Hash, + total_size: u64, + stream_offset: u64, + reader: impl AsyncStreamReader + 'a, + ) -> impl Future> + 'a { + async move { + if stream_offset >= total_size { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "offset must not be greater than total_size", + )); + } + let entry = self.get_or_create(hash, total_size).await?; + let mut bw = entry.batch_writer().await?; + + let ranges = range_from_offset_and_length(stream_offset, total_size - stream_offset); + let mut decoder = ResponseDecoder::new( + hash.into(), + ranges, + BaoTree::new(total_size, IROH_BLOCK_SIZE), + reader, + ); + let size = decoder.tree().size(); + let mut buf = Vec::new(); + let is_complete = loop { + decoder = match decoder.next().await { + ResponseDecoderNext::More((decoder, item)) => { + let item = match item { + Err(DecodeError::LeafNotFound(_) | DecodeError::ParentNotFound(_)) => { + break false + } + Err(err) => return Err(err.into()), + Ok(item) => item, + }; + match &item { + BaoContentItem::Parent(_) => { + buf.push(item); + } + BaoContentItem::Leaf(_) => { + buf.push(item); + let batch = std::mem::take(&mut buf); + bw.write_batch(size, batch).await?; + } + } + decoder + } + ResponseDecoderNext::Done(_reader) => { + debug_assert!(buf.is_empty(), "last node of bao tree must be leaf node"); + break true; + } + }; + }; + bw.sync().await?; + drop(bw); + if is_complete { + self.insert_complete(entry).await?; + } + Ok(()) + } + } + /// Set a tag fn set_tag( &self, @@ -418,6 +507,11 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug { } } +fn range_from_offset_and_length(offset: u64, length: u64) -> bao_tree::ChunkRanges { + let ranges = bao_tree::ByteRanges::from(offset..(offset + length)); + bao_tree::io::round_up_to_chunks(&ranges) +} + async fn validate_impl( store: &impl Store, repair: bool, diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 93279c14f8..fd6de7e127 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -29,6 +29,7 @@ genawaiter = "0.99.1" hex = "0.4.3" iroh-base = { version = "0.22.0", path = "../iroh-base" } iroh-blobs = { version = "0.22.0", path = "../iroh-blobs" } +iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.22.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.22.0", path = "../iroh-net" } meadowcap = "0.1.0" diff --git a/iroh-willow/src/proto/wgps.rs b/iroh-willow/src/proto/wgps.rs index a6d8a468a9..2e9f4c2887 100644 --- a/iroh-willow/src/proto/wgps.rs +++ b/iroh-willow/src/proto/wgps.rs @@ -12,11 +12,11 @@ pub use fingerprint::*; pub use handles::*; pub use messages::*; -pub const MAX_PAYLOAD_SIZE_POWER: u8 = 12; +pub const MAX_PAYLOAD_SIZE_POWER: u8 = 18; /// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: /// when an Entry’s payload_length is strictly greater than the maximum payload size, /// its Payload may only be transmitted when explicitly requested. /// -/// The value is 4096. +/// The value is 256KiB. pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index acb5cd9d07..dc52568895 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -6,10 +6,7 @@ use crate::{ data_model::AuthorisedEntry, wgps::{DataMessage, DataSendEntry, DataSendPayload, StaticToken}, }, - session::{ - channels::ChannelSenders, payload::DEFAULT_CHUNK_SIZE, static_tokens::StaticTokens, Error, - SessionId, - }, + session::{channels::ChannelSenders, static_tokens::StaticTokens, Error, SessionId}, store::{ entry::{EntryChannel, EntryOrigin}, traits::Storage, @@ -94,24 +91,21 @@ impl DataSender { .bind_and_send_ours(static_token, &self.send) .await?; let digest = *entry.payload_digest(); + let offset = 0; let msg = DataSendEntry { entry: entry.into(), static_token_handle, dynamic_token, - offset: 0, + offset, }; self.send.send(msg).await?; // TODO: only send payload if configured to do so and/or under size limit. let send_payloads = true; if send_payloads { - send_payload_chunked( - digest, - self.store.payloads(), - &self.send, - DEFAULT_CHUNK_SIZE, - |bytes| DataSendPayload { bytes }.into(), - ) + send_payload_chunked(digest, self.store.payloads(), &self.send, offset, |bytes| { + DataSendPayload { bytes }.into() + }) .await?; } Ok(()) @@ -164,8 +158,12 @@ impl DataReceiver { )?; let (entry, _token) = authorised_entry.into_parts(); // TODO: handle offset - self.current_payload - .set(*entry.payload_digest(), entry.payload_length())?; + self.current_payload.set( + *entry.payload_digest(), + entry.payload_length(), + None, + Some(message.offset), + )?; Ok(()) } diff --git a/iroh-willow/src/session/payload.rs b/iroh-willow/src/session/payload.rs index d5c099ac83..be3596529e 100644 --- a/iroh-willow/src/session/payload.rs +++ b/iroh-willow/src/session/payload.rs @@ -1,19 +1,26 @@ +use std::io; + use bytes::Bytes; -use futures_lite::{future::BoxedLocal, FutureExt}; +use futures_concurrency::future::TryJoin; +use futures_lite::StreamExt; +use futures_util::TryFutureExt; use iroh_blobs::{ - store::{bao_tree::io::fsm::AsyncSliceReader, MapEntry, Store as PayloadStore}, - util::progress::IgnoreProgressSender, - TempTag, + store::{MapEntry, Store as PayloadStore}, + Hash, HashAndFormat, TempTag, }; +use iroh_io::TokioStreamReader; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; use crate::{ proto::{data_model::PayloadDigest, wgps::Message}, session::channels::ChannelSenders, + util::pipe::chunked_pipe, }; use super::Error; -pub const DEFAULT_CHUNK_SIZE: usize = 1024 * 64; +const CHUNK_SIZE: usize = 1024 * 32; /// Send a payload in chunks. /// @@ -25,30 +32,31 @@ pub async fn send_payload_chunked( digest: PayloadDigest, payload_store: &P, senders: &ChannelSenders, - chunk_size: usize, + offset: u64, map: impl Fn(Bytes) -> Message, ) -> Result { - let payload_entry = payload_store - .get(&digest.0) + let hash: Hash = digest.into(); + let entry = payload_store + .get(&hash) .await .map_err(Error::PayloadStore)?; - if let Some(entry) = payload_entry { - let mut reader = entry.data_reader().await.map_err(Error::PayloadStore)?; - let len: u64 = entry.size().value(); - let mut pos = 0; - while pos < len { - let bytes = reader - .read_at(pos, chunk_size) - .await - .map_err(Error::PayloadStore)?; - pos += bytes.len() as u64; + let Some(entry) = entry else { + return Ok(false); + }; + + let (writer, mut reader) = chunked_pipe(CHUNK_SIZE); + let write_stream_fut = entry + .write_verifiable_stream(offset, writer) + .map_err(Error::PayloadStore); + let send_fut = async { + while let Some(bytes) = reader.try_next().await.map_err(Error::PayloadStore)? { let msg = map(bytes); senders.send(msg).await?; } - Ok(true) - } else { - Ok(false) - } + Ok(()) + }; + (write_stream_fut, send_fut).try_join().await?; + Ok(true) } #[derive(Debug, Default)] @@ -59,33 +67,39 @@ struct CurrentPayloadInner { payload_digest: PayloadDigest, expected_length: u64, received_length: u64, + total_length: u64, + offset: u64, writer: Option, } #[derive(derive_more::Debug)] struct PayloadWriter { - #[debug(skip)] - fut: BoxedLocal>, - sender: flume::Sender>, + tag: TempTag, + task: tokio::task::JoinHandle>, + sender: mpsc::Sender>, } impl CurrentPayload { - // pub fn new() -> Self { - // Self::default() - // } - + /// Set the payload to be received. pub fn set( &mut self, payload_digest: PayloadDigest, - expected_length: u64, + total_length: u64, + available_length: Option, + offset: Option, ) -> Result<(), Error> { if self.0.is_some() { return Err(Error::InvalidMessageInCurrentState); } + let offset = offset.unwrap_or(0); + let available_length = available_length.unwrap_or(total_length); + let expected_length = available_length - offset; self.0 = Some(CurrentPayloadInner { payload_digest, writer: None, expected_length, + total_length, + offset, received_length: 0, }); Ok(()) @@ -99,24 +113,29 @@ impl CurrentPayload { let state = self.0.as_mut().ok_or(Error::InvalidMessageInCurrentState)?; let len = chunk.len(); let store = store.clone(); - let writer = state.writer.get_or_insert_with(move || { - let (tx, rx) = flume::bounded(1); + let writer = state.writer.get_or_insert_with(|| { + let (tx, rx) = tokio::sync::mpsc::channel(2); let store = store.clone(); + let hash: Hash = state.payload_digest.into(); + let total_length = state.total_length; + let offset = state.offset; + let tag = store.temp_tag(HashAndFormat::raw(hash)); + let mut reader = + TokioStreamReader(tokio_util::io::StreamReader::new(ReceiverStream::new(rx))); let fut = async move { store - .import_stream( - rx.into_stream(), - iroh_blobs::BlobFormat::Raw, - IgnoreProgressSender::default(), - ) - .await + .import_verifiable_stream(hash, total_length, offset, &mut reader) + .await?; + Ok(()) }; + let task = tokio::task::spawn_local(fut); PayloadWriter { - fut: fut.boxed_local(), + tag, + task, sender: tx, } }); - writer.sender.send_async(Ok(chunk)).await?; + writer.sender.send(Ok(chunk)).await?; state.received_length += len as u64; Ok(()) } @@ -130,32 +149,24 @@ impl CurrentPayload { pub async fn finalize(&mut self) -> Result<(), Error> { let state = self.0.take().ok_or(Error::InvalidMessageInCurrentState)?; - let (hash, len) = match state.writer { - Some(writer) => { - drop(writer.sender); - let (tag, len) = writer.fut.await.map_err(Error::PayloadStore)?; - (*tag.hash(), len) - } - // The writer is only empty if we did not receive any chunks. In this case, the - // "received data" is that of the empty hash with size 0. - None => (iroh_base::hash::Hash::EMPTY, 0), - }; - if hash != state.payload_digest.0 { - return Err(Error::PayloadDigestMismatch); - } - if len != state.expected_length { - return Err(Error::PayloadDigestMismatch); + // The writer is only set if we received at least one payload chunk. + if let Some(writer) = state.writer { + drop(writer.sender); + writer + .task + .await + .expect("payload writer panicked") + .map_err(Error::PayloadStore)?; + // TODO: Make sure blobs referenced from entries are protected from GC by now. + drop(writer.tag); } - // TODO: protect from gc - // we could store a tag for each blob - // however we really want reference counting here, not individual tags - // can also fallback to the naive impl from iroh-docs to just protect all docs hashes on gc Ok(()) } pub fn is_active(&self) -> bool { self.0.as_ref().map(|s| s.writer.is_some()).unwrap_or(false) } + pub fn ensure_none(&self) -> Result<(), Error> { if self.is_active() { Err(Error::InvalidMessageInCurrentState) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 0f2ccadb0c..29a7df0258 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -24,7 +24,7 @@ use crate::{ session::{ aoi_finder::AoiIntersection, channels::{ChannelSenders, MessageReceiver}, - payload::{send_payload_chunked, CurrentPayload, DEFAULT_CHUNK_SIZE}, + payload::{send_payload_chunked, CurrentPayload}, static_tokens::StaticTokens, Error, Role, SessionId, }, @@ -309,7 +309,7 @@ impl EntryState { pub fn received_send_entry( &mut self, payload_digest: PayloadDigest, - _total_payload_length: u64, + total_payload_length: u64, available_payload_length: u64, ) -> Result<(), Error> { let state = self.get_mut()?; @@ -320,7 +320,9 @@ impl EntryState { }; state.current_payload.set( payload_digest, - available_payload_length, + total_payload_length, + Some(available_payload_length), + None, )?; Ok(()) } @@ -587,13 +589,9 @@ impl Target { // TODO: only send payload if configured to do so and/or under size limit. if payload_len <= shared.max_eager_payload_size { - send_payload_chunked( - digest, - shared.store.payloads(), - &shared.send, - DEFAULT_CHUNK_SIZE, - |bytes| ReconciliationSendPayload { bytes }.into(), - ) + send_payload_chunked(digest, shared.store.payloads(), &shared.send, 0, |bytes| { + ReconciliationSendPayload { bytes }.into() + }) .await?; } shared.send.send(ReconciliationTerminatePayload).await?; diff --git a/iroh-willow/src/util.rs b/iroh-willow/src/util.rs index 914072c230..a8968ddb59 100644 --- a/iroh-willow/src/util.rs +++ b/iroh-willow/src/util.rs @@ -4,6 +4,7 @@ pub mod channel; pub mod codec; pub mod codec2; pub mod gen_stream; +pub mod pipe; pub mod queue; pub mod stream; pub mod time; diff --git a/iroh-willow/src/util/pipe.rs b/iroh-willow/src/util/pipe.rs new file mode 100644 index 0000000000..4c68bc0ea2 --- /dev/null +++ b/iroh-willow/src/util/pipe.rs @@ -0,0 +1,154 @@ +use std::{ + cell::RefCell, + future::poll_fn, + io, + rc::Rc, + task::{Context, Poll, Waker}, +}; + +use bytes::{Bytes, BytesMut}; +use futures_lite::Stream; +use iroh_io::AsyncStreamWriter; + +/// In-memory local-io async pipe between a [`AsyncStreamWriter`] and a [`Stream`] of [`Bytes`]. +/// +/// The pipe maintains a shared in-memory buffer of `chunk_size` +/// +/// [`PipeWriter`] is a [`AsyncStreamWriter`] that writes into the shared buffer. +/// +/// [`PipeReader`] is [`Stream`] that emits [`Bytes`] of `chunk_size` length. The last chunk may be +/// smaller than `chunk_size`. +/// +/// The pipe is closed once either the reader or the writer are dropped. If the reader is dropped, +/// subsequent writes will fail with [`io::ErrorKind::BrokenPipe`]. +// TODO: Move to iroh-io? +pub fn chunked_pipe(chunk_size: usize) -> (PipeWriter, PipeReader) { + let shared = Shared { + buf: BytesMut::new(), + chunk_size, + read_waker: None, + write_waker: None, + closed: false, + }; + let shared = Rc::new(RefCell::new(shared)); + let writer = PipeWriter { + shared: shared.clone(), + }; + let reader = PipeReader { shared }; + (writer, reader) +} + +#[derive(Debug)] +struct Shared { + buf: BytesMut, + chunk_size: usize, + read_waker: Option, + write_waker: Option, + closed: bool, +} + +impl Shared { + fn poll_write(&mut self, data: &[u8], cx: &mut Context<'_>) -> Poll> { + if self.closed { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "write after close", + ))); + } + let remaining = self.chunk_size - self.buf.len(); + let amount = data.len().min(remaining); + if amount > 0 { + self.buf.extend_from_slice(&data[..amount]); + if let Some(waker) = self.read_waker.take() { + waker.wake(); + } + Poll::Ready(Ok(amount)) + } else { + self.write_waker = Some(cx.waker().to_owned()); + Poll::Pending + } + } + + fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll>> { + if self.buf.len() == self.chunk_size { + if let Some(write_waker) = self.write_waker.take() { + write_waker.wake(); + } + Poll::Ready(Some(Ok(self.buf.split().freeze()))) + } else if self.closed && !self.buf.is_empty() { + Poll::Ready(Some(Ok(self.buf.split().freeze()))) + } else if self.closed { + Poll::Ready(None) + } else { + self.read_waker = Some(cx.waker().to_owned()); + Poll::Pending + } + } + + fn close(&mut self) { + self.closed = true; + if let Some(waker) = self.read_waker.take() { + waker.wake(); + } + if let Some(waker) = self.write_waker.take() { + waker.wake(); + } + } +} + +/// The writer returned from [`chunked_pipe`]. +#[derive(Debug)] +pub struct PipeWriter { + shared: Rc>, +} + +/// The reader returned from [`chunked_pipe`]. +#[derive(Debug)] +pub struct PipeReader { + shared: Rc>, +} + +impl Drop for PipeWriter { + fn drop(&mut self) { + let mut shared = self.shared.borrow_mut(); + shared.close(); + } +} + +impl Drop for PipeReader { + fn drop(&mut self) { + let mut shared = self.shared.borrow_mut(); + shared.close(); + } +} + +impl AsyncStreamWriter for PipeWriter { + async fn write(&mut self, data: &[u8]) -> io::Result<()> { + let mut written = 0; + while written < data.len() { + written += poll_fn(|cx| { + let mut shared = self.shared.borrow_mut(); + shared.poll_write(&data[written..], cx) + }) + .await?; + } + Ok(()) + } + + async fn write_bytes(&mut self, data: bytes::Bytes) -> io::Result<()> { + self.write(&data[..]).await + } + + async fn sync(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stream for PipeReader { + type Item = io::Result; + + fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut shared = self.shared.borrow_mut(); + shared.poll_next(cx) + } +} diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index fd01eaf55c..ab382b9719 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -1,14 +1,17 @@ use std::time::Duration; use anyhow::Result; +use bytes::Bytes; use futures_concurrency::future::TryJoin; use futures_lite::StreamExt; +use iroh_blobs::store::{Map, MapEntry}; +use iroh_io::AsyncSliceReaderExt; use iroh_willow::{ interest::{Interests, IntoAreaOfInterest}, proto::{ data_model::{Path, PathExt}, - grouping::{Area, AreaExt}, + grouping::{Area, AreaExt, Range3d}, }, session::{ intents::{Completion, EventKind}, @@ -304,6 +307,7 @@ mod util { #[derive(Debug, Clone)] pub struct Peer { + pub blobs: iroh_blobs::store::mem::Store, endpoint: Endpoint, engine: Engine, accept_task: Arc>>>>, @@ -320,7 +324,8 @@ mod util { .alpns(vec![ALPN.to_vec()]) .bind(0) .await?; - let payloads = iroh_blobs::store::mem::Store::default(); + let blobs = iroh_blobs::store::mem::Store::default(); + let payloads = blobs.clone(); let create_store = move || iroh_willow::store::memory::Store::new(payloads); let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); let accept_task = tokio::task::spawn({ @@ -343,6 +348,7 @@ mod util { } }); Ok(Self { + blobs, endpoint, engine, accept_task: Arc::new(Mutex::new(Some(accept_task))), @@ -480,3 +486,35 @@ async fn peer_manager_empty_payload() -> Result<()> { Ok(()) } + +#[tokio::test(flavor = "multi_thread")] +async fn peer_manager_big_payload() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("peer_manager_empty_payload"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, _alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + let betty_node_id = betty.node_id(); + + let payload = Bytes::from(vec![2u8; 1024 * 128]); + insert(&betty, namespace, betty_user, &[b"foo"], payload.clone()).await?; + + let init = SessionInit::new(Interests::all(), SessionMode::ReconcileOnce); + let mut intent = alfie.sync_with_peer(betty_node_id, init).await.unwrap(); + + intent.complete().await?; + + let entries = alfie.get_entries(namespace, Range3d::new_full()).await?; + let entries: Vec<_> = entries.try_collect().await?; + assert_eq!(entries.len(), 1); + let entry = &entries[0]; + let hash: iroh_blobs::Hash = (*entry.payload_digest()).into(); + let blob = alfie.blobs.get(&hash).await?.expect("missing blob"); + let actual = blob.data_reader().await?.read_to_end().await?; + assert_eq!(actual.len(), payload.len()); + assert!(actual == payload); + + [alfie, betty].map(Peer::shutdown).try_join().await?; + + Ok(()) +} From d5ab28e435c3fb0d051a95f4ea0f9a6e102e3a36 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Wed, 21 Aug 2024 08:55:49 +0200 Subject: [PATCH 158/198] docs: improve docs of verifiable stream methods --- iroh-blobs/src/store/traits.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/iroh-blobs/src/store/traits.rs b/iroh-blobs/src/store/traits.rs index 0007f78c2e..17d6643745 100644 --- a/iroh-blobs/src/store/traits.rs +++ b/iroh-blobs/src/store/traits.rs @@ -98,20 +98,25 @@ pub trait MapEntry: std::fmt::Debug + Clone + Send + Sync + 'static { /// A future that resolves to a reader that can be used to read the data fn data_reader(&self) -> impl Future> + Send; - /// Encodes data and outboard into a stream which can be imported with [`Store::import_verifiable_stream`]. + /// Encodes data and outboard into a [`AsyncStreamWriter`]. + /// + /// Data and outboard parts will be interleaved. + /// + /// `offset` is the byte offset in the blob to start the stream from. It will be rounded down to + /// the next chunk group. /// /// Returns immediately without error if `start` is equal or larger than the entry's size. fn write_verifiable_stream<'a>( &'a self, - start: u64, + offset: u64, writer: impl AsyncStreamWriter + 'a, ) -> impl Future> + 'a { async move { let size = self.size().value(); - if start >= size { + if offset >= size { return Ok(()); } - let ranges = range_from_offset_and_length(start, size - start); + let ranges = range_from_offset_and_length(offset, size - offset); let (outboard, data) = tokio::try_join!(self.outboard(), self.data_reader())?; encode_ranges_validated(data, outboard, &ranges, writer).await?; Ok(()) @@ -369,15 +374,19 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug { } /// Import a blob from a verified stream, as emitted by [`MapEntry::write_verifiable_stream`]; + /// + /// `total_size` is the total size of the blob as reported by the remote. + /// `offset` is the byte offset in the blob where the stream starts. It will be rounded + /// to the next chunk group. fn import_verifiable_stream<'a>( &'a self, hash: Hash, total_size: u64, - stream_offset: u64, + offset: u64, reader: impl AsyncStreamReader + 'a, ) -> impl Future> + 'a { async move { - if stream_offset >= total_size { + if offset >= total_size { return Err(io::Error::new( io::ErrorKind::InvalidInput, "offset must not be greater than total_size", @@ -386,7 +395,7 @@ pub trait Store: ReadableStore + MapMut + std::fmt::Debug { let entry = self.get_or_create(hash, total_size).await?; let mut bw = entry.batch_writer().await?; - let ranges = range_from_offset_and_length(stream_offset, total_size - stream_offset); + let ranges = range_from_offset_and_length(offset, total_size - offset); let mut decoder = ResponseDecoder::new( hash.into(), ranges, From 5df0b6d1e6180a6754521a6aa1cf55742665b806 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Fri, 23 Aug 2024 14:09:45 +0200 Subject: [PATCH 159/198] feat: re-add bench example --- iroh-willow/examples/bench.rs | 263 +++++++++++++++++++++++++++++++++- 1 file changed, 262 insertions(+), 1 deletion(-) diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index b574a817af..451543f1e6 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -1,4 +1,265 @@ -fn main() {} +use std::time::Instant; + +use anyhow::Result; +use futures_lite::StreamExt; +use iroh_willow::{ + interest::Interests, + proto::grouping::Range3d, + session::{intents::Completion, SessionInit, SessionMode}, +}; +use tracing::info; + +use self::util::{create_rng, insert, parse_env_var, setup_and_delegate, spawn_two}; + +#[tokio::main] +async fn main() -> Result<()> { + let t = Instant::now(); + tracing_subscriber::fmt::init(); + let n_betty: usize = parse_env_var("N_BETTY", 100); + let n_alfie: usize = parse_env_var("N_ALFIE", 100); + let mut rng = create_rng("peer_manager_two_intents"); + + let start = Instant::now(); + let [alfie, betty] = spawn_two(&mut rng).await?; + let (namespace, alfie_user, betty_user) = setup_and_delegate(&alfie, &betty).await?; + info!(t=?t.elapsed(), d=?start.elapsed(), "setup done"); + + let start = Instant::now(); + for i in 0..n_alfie { + let x = format!("{i}"); + insert( + &alfie, + namespace, + alfie_user, + &[b"alfie", x.as_bytes()], + "foo", + ) + .await?; + } + for i in 0..n_betty { + let x = format!("{i}"); + insert( + &betty, + namespace, + betty_user, + &[b"betty", x.as_bytes()], + "foo", + ) + .await?; + } + info!(t=?t.elapsed(), d=?start.elapsed(), "insert done"); + + let start = Instant::now(); + let init = SessionInit::new(Interests::all(), SessionMode::ReconcileOnce); + let mut intent_alfie = alfie.sync_with_peer(betty.node_id(), init.clone()).await.unwrap(); + let mut intent_betty= betty.sync_with_peer(alfie.node_id(), init).await.unwrap(); + let completion_alfie = intent_alfie.complete().await?; + // info!(t=?t.elapsed(), d=?start.elapsed(), "alfie done"); + // let start = Instant::now(); + let completion_betty = intent_betty.complete().await?; + info!(t=?t.elapsed(), d=?start.elapsed(), "sync done"); + + let time = start.elapsed(); + let total = n_alfie + n_betty; + let per_entry = time.as_micros() / total as u128; + let entries_per_second = (total as f32 / time.as_secs_f32()).round(); + info!(time=?time, ms_per_entry=per_entry, entries_per_second, "sync done"); + + assert_eq!(completion_alfie, Completion::Complete); + assert_eq!(completion_betty, Completion::Complete); + let start = Instant::now(); + let alfie_count = alfie + .get_entries(namespace, Range3d::new_full()) + .await? + .count() + .await; + let betty_count = betty + .get_entries(namespace, Range3d::new_full()) + .await? + .count() + .await; + info!(t=?t.elapsed(), d=?start.elapsed(), "get done"); + info!("alfie has now {} entries", alfie_count); + info!("betty has now {} entries", betty_count); + assert_eq!(alfie_count, n_alfie + n_betty); + assert_eq!(betty_count, n_alfie + n_betty); + + Ok(()) +} + +mod util { + use std::sync::{Arc, Mutex}; + + use anyhow::Result; + use bytes::Bytes; + use futures_concurrency::future::TryJoin; + use iroh_net::{Endpoint, NodeId}; + use rand::SeedableRng; + use rand_chacha::ChaCha12Rng; + use rand_core::CryptoRngCore; + use tokio::task::JoinHandle; + + use iroh_willow::{ + engine::{AcceptOpts, Engine}, + form::EntryForm, + interest::{CapSelector, DelegateTo, RestrictArea}, + proto::{ + data_model::{Path, PathExt}, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::AccessMode, + }, + ALPN, + }; + + pub fn create_rng(seed: &str) -> ChaCha12Rng { + let seed = iroh_base::hash::Hash::new(seed); + ChaCha12Rng::from_seed(*(seed.as_bytes())) + } + + #[derive(Debug, Clone)] + pub struct Peer { + endpoint: Endpoint, + engine: Engine, + accept_task: Arc>>>>, + } + + impl Peer { + pub async fn spawn( + secret_key: iroh_net::key::SecretKey, + accept_opts: AcceptOpts, + ) -> Result { + let endpoint = Endpoint::builder() + .secret_key(secret_key) + .relay_mode(iroh_net::relay::RelayMode::Disabled) + .alpns(vec![ALPN.to_vec()]) + .bind(0) + .await?; + let blobs = iroh_blobs::store::mem::Store::default(); + let create_store = move || iroh_willow::store::memory::Store::new(blobs); + let engine = Engine::spawn(endpoint.clone(), create_store, accept_opts); + let accept_task = tokio::task::spawn({ + let engine = engine.clone(); + let endpoint = endpoint.clone(); + async move { + while let Some(mut conn) = endpoint.accept().await { + let Ok(alpn) = conn.alpn().await else { + continue; + }; + if alpn != ALPN { + continue; + } + let Ok(conn) = conn.await else { + continue; + }; + engine.handle_connection(conn).await?; + } + Result::Ok(()) + } + }); + Ok(Self { + endpoint, + engine, + accept_task: Arc::new(Mutex::new(Some(accept_task))), + }) + } + + pub async fn shutdown(self) -> Result<()> { + let accept_task = self.accept_task.lock().unwrap().take(); + if let Some(accept_task) = accept_task { + accept_task.abort(); + match accept_task.await { + Err(err) if err.is_cancelled() => {} + Ok(Ok(())) => {} + Err(err) => Err(err)?, + Ok(Err(err)) => Err(err)?, + } + } + self.engine.shutdown().await?; + self.endpoint.close(0u8.into(), b"").await?; + Ok(()) + } + + pub fn node_id(&self) -> NodeId { + self.endpoint.node_id() + } + } + + impl std::ops::Deref for Peer { + type Target = Engine; + fn deref(&self) -> &Self::Target { + &self.engine + } + } + + pub async fn spawn_two(rng: &mut impl CryptoRngCore) -> Result<[Peer; 2]> { + let peers = [ + iroh_net::key::SecretKey::generate_with_rng(rng), + iroh_net::key::SecretKey::generate_with_rng(rng), + ] + .map(|secret_key| Peer::spawn(secret_key, Default::default())) + .try_join() + .await?; + + peers[0] + .endpoint + .add_node_addr(peers[1].endpoint.node_addr().await?)?; + + peers[1] + .endpoint + .add_node_addr(peers[0].endpoint.node_addr().await?)?; + + Ok(peers) + } + + pub async fn setup_and_delegate( + alfie: &Engine, + betty: &Engine, + ) -> Result<(NamespaceId, UserId, UserId)> { + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::widest(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, RestrictArea::None), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + Ok((namespace_id, user_alfie, user_betty)) + } + + pub async fn insert( + handle: &Engine, + namespace_id: NamespaceId, + user: UserId, + path: &[&[u8]], + bytes: impl Into, + ) -> Result<()> { + let path = Path::from_bytes(path)?; + let entry = EntryForm::new_bytes(namespace_id, path, bytes); + handle.insert(entry, user).await?; + Ok(()) + } + + pub fn parse_env_var(var: &str, default: T) -> T + where + T: std::str::FromStr, + T::Err: std::fmt::Debug, + { + match std::env::var(var).as_deref() { + Ok(val) => val + .parse() + .unwrap_or_else(|_| panic!("failed to parse environment variable {var}")), + Err(_) => default, + } + } +} // use std::{collections::BTreeSet, time::Instant}; // From 81334f1b7bfdbcb3e89389a7655230962d3b3f00 Mon Sep 17 00:00:00 2001 From: "Franz Heinzmann (Frando)" Date: Mon, 26 Aug 2024 13:37:54 +0200 Subject: [PATCH 160/198] chore: missing clone and fmt --- iroh-willow/examples/bench.rs | 7 +++++-- iroh-willow/src/session.rs | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index 451543f1e6..da7c35daf8 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -51,8 +51,11 @@ async fn main() -> Result<()> { let start = Instant::now(); let init = SessionInit::new(Interests::all(), SessionMode::ReconcileOnce); - let mut intent_alfie = alfie.sync_with_peer(betty.node_id(), init.clone()).await.unwrap(); - let mut intent_betty= betty.sync_with_peer(alfie.node_id(), init).await.unwrap(); + let mut intent_alfie = alfie + .sync_with_peer(betty.node_id(), init.clone()) + .await + .unwrap(); + let mut intent_betty = betty.sync_with_peer(alfie.node_id(), init).await.unwrap(); let completion_alfie = intent_alfie.complete().await?; // info!(t=?t.elapsed(), d=?start.elapsed(), "alfie done"); // let start = Instant::now(); diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index c5165d5239..dd54b26cc8 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -81,7 +81,7 @@ impl SessionMode { } /// Options to initialize a session. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SessionInit { /// Selects the areas we wish to synchronize. pub interests: Interests, From 109bc8e9304f227bc2c5f9021a07ff0b4481f676 Mon Sep 17 00:00:00 2001 From: Frando Date: Tue, 27 Aug 2024 10:05:36 +0200 Subject: [PATCH 161/198] test: re-add bench example --- iroh-willow/examples/bench.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index da7c35daf8..80bc6ca47c 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -246,7 +246,7 @@ mod util { ) -> Result<()> { let path = Path::from_bytes(path)?; let entry = EntryForm::new_bytes(namespace_id, path, bytes); - handle.insert(entry, user).await?; + handle.insert_entry(entry, user).await?; Ok(()) } From a17d4195112c5daba769b9fd9428028155339be6 Mon Sep 17 00:00:00 2001 From: Frando Date: Tue, 27 Aug 2024 18:30:07 +0200 Subject: [PATCH 162/198] fix: fixes after merge --- iroh-willow/Cargo.toml | 9 +++++---- iroh-willow/src/lib.rs | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index fd6de7e127..f34a66540e 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -27,11 +27,11 @@ futures-lite = "2.3.0" futures-util = "0.3.30" genawaiter = "0.99.1" hex = "0.4.3" -iroh-base = { version = "0.22.0", path = "../iroh-base" } -iroh-blobs = { version = "0.22.0", path = "../iroh-blobs" } +iroh-base = { version = "0.23.0", path = "../iroh-base" } +iroh-blobs = { version = "0.23.0", path = "../iroh-blobs" } iroh-io = { version = "0.6.0", features = ["stats"] } -iroh-metrics = { version = "0.22.0", path = "../iroh-metrics", optional = true } -iroh-net = { version = "0.22.0", path = "../iroh-net" } +iroh-metrics = { version = "0.23.0", path = "../iroh-metrics", optional = true } +iroh-net = { version = "0.23.0", path = "../iroh-net" } meadowcap = "0.1.0" postcard = { version = "1", default-features = false, features = [ "alloc", "use-std", "experimental-derive", ] } quinn = { package = "iroh-quinn", version = "0.10.5" } @@ -54,6 +54,7 @@ zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } [dev-dependencies] iroh-test = { path = "../iroh-test" } +iroh-net = { path = "../iroh-net", features = ["test-utils"] } rand_chacha = "0.3.1" tokio = { version = "1", features = ["sync", "macros"] } proptest = "1.2.0" diff --git a/iroh-willow/src/lib.rs b/iroh-willow/src/lib.rs index a8bab71daf..52408cbec4 100644 --- a/iroh-willow/src/lib.rs +++ b/iroh-willow/src/lib.rs @@ -12,4 +12,5 @@ pub mod session; pub mod store; pub mod util; +pub use engine::Engine; pub use net::ALPN; From 82d4abf93a64051b153eccfc87e619b61c508f4d Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 26 Aug 2024 21:55:47 +0200 Subject: [PATCH 163/198] feat: add more serde compat to iroh-willow --- iroh-willow/Cargo.toml | 2 +- iroh-willow/src/engine/actor.rs | 28 ++---- iroh-willow/src/form.rs | 59 +++++++++++- iroh-willow/src/interest.rs | 62 +++++++++---- iroh-willow/src/net.rs | 2 +- iroh-willow/src/proto/data_model.rs | 85 +++++++++++++++--- iroh-willow/src/proto/grouping.rs | 106 +++++++++++++++++----- iroh-willow/src/proto/keys.rs | 2 +- iroh-willow/src/proto/meadowcap.rs | 135 ++++++++++++++++++++-------- iroh-willow/src/session.rs | 5 +- iroh-willow/src/session/intents.rs | 60 ++++++++++++- iroh-willow/src/store/auth.rs | 3 +- iroh-willow/src/store/memory.rs | 12 +-- iroh-willow/tests/basic.rs | 2 +- 14 files changed, 434 insertions(+), 129 deletions(-) diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index f34a66540e..16ccf8cade 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-willow" -version = "0.18.0" +version = "0.22.0" edition = "2021" readme = "README.md" description = "willow protocol implementation for iroh" diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index ab379a5fa5..3295b208fd 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -10,7 +10,7 @@ use tokio::{ use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ - form::{AuthForm, EntryForm, EntryOrForm}, + form::{AuthForm, EntryOrForm}, interest::{CapSelector, CapabilityPack, DelegateTo, InterestMap, Interests}, net::ConnHandle, proto::{ @@ -84,32 +84,20 @@ impl ActorHandle { Ok(()) } - pub async fn insert_entry(&self, entry: Entry, auth: impl Into) -> Result<()> { - let (reply, reply_rx) = oneshot::channel(); - self.send(Input::InsertEntry { - entry: EntryOrForm::Entry(entry), - auth: auth.into(), - reply, - }) - .await?; - reply_rx.await??; - Ok(()) - } - - pub async fn insert( + pub async fn insert_entry( &self, - form: EntryForm, - authorisation: impl Into, + entry: impl Into, + auth: impl Into, ) -> Result<(Entry, bool)> { let (reply, reply_rx) = oneshot::channel(); self.send(Input::InsertEntry { - entry: EntryOrForm::Form(form), - auth: authorisation.into(), + entry: entry.into(), + auth: auth.into(), reply, }) .await?; - let inserted = reply_rx.await??; - Ok(inserted) + let (entry, inserted) = reply_rx.await??; + Ok((entry, inserted)) } pub async fn insert_secret(&self, secret: impl Into) -> Result<()> { diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 3866a7dd7a..a03448b6fa 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -15,8 +15,7 @@ use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; use crate::proto::{ - data_model::SerdeWriteCapability, - data_model::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, + data_model::{self, Entry, NamespaceId, Path, SerdeWriteCapability, SubspaceId, Timestamp}, keys::UserId, }; @@ -82,7 +81,7 @@ impl PayloadForm { } /// Either a [`Entry`] or a [`EntryForm`]. -#[derive(Debug)] +#[derive(Debug, derive_more::From)] pub enum EntryOrForm { Entry(Entry), Form(EntryForm), @@ -151,3 +150,57 @@ pub enum TimestampForm { /// Set the timestamp to the provided value. Exact(Timestamp), } + +/// Either a [`Entry`] or a [`EntryForm`]. +#[derive(Debug, Serialize, Deserialize)] +pub enum SerdeEntryOrForm { + Entry(#[serde(with = "data_model::serde_encoding::entry")] Entry), + Form(SerdeEntryForm), +} + +impl From for EntryOrForm { + fn from(value: SerdeEntryOrForm) -> Self { + match value { + SerdeEntryOrForm::Entry(entry) => EntryOrForm::Entry(entry), + SerdeEntryOrForm::Form(form) => EntryOrForm::Form(form.into()), + } + } +} + +/// Creates an entry while setting some fields automatically. +#[derive(Debug, Serialize, Deserialize)] +pub struct SerdeEntryForm { + pub namespace_id: NamespaceId, + pub subspace_id: SubspaceForm, + #[serde(with = "data_model::serde_encoding::path")] + pub path: Path, + pub timestamp: TimestampForm, + pub payload: SerdePayloadForm, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum SerdePayloadForm { + /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail + /// otherwise. + Hash(Hash), +} + +impl From for PayloadForm { + fn from(value: SerdePayloadForm) -> Self { + match value { + SerdePayloadForm::Hash(hash) => PayloadForm::Hash(hash), + } + } +} + +impl From for EntryForm { + fn from(value: SerdeEntryForm) -> Self { + EntryForm { + namespace_id: value.namespace_id, + subspace_id: value.subspace_id, + path: value.path, + timestamp: value.timestamp, + payload: value.payload.into(), + } + } +} diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index 22af2ad0c0..b76efce869 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -5,12 +5,10 @@ use std::collections::{hash_map, HashMap, HashSet}; use serde::{Deserialize, Serialize}; use crate::proto::{ - data_model::{Entry, SerdeWriteCapability}, - grouping::{Area, AreaExt, AreaOfInterest, Point}, + data_model::Entry, + grouping::{self, Area, AreaExt, AreaOfInterest, Point}, keys::{NamespaceId, UserId}, - meadowcap::{ - serde_encoding::SerdeReadAuthorisation, AccessMode, McCapability, ReadAuthorisation, - }, + meadowcap::{self, AccessMode, McCapability, ReadAuthorisation}, }; pub type InterestMap = HashMap>; @@ -18,15 +16,16 @@ pub type InterestMap = HashMap>; /// Enum for describing synchronisation interests. /// /// You should use [`Self::builder`] for a straightforward way to construct this. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub enum Interests { /// Use all the capabilities we have. #[default] All, /// Use the selected capabilities and areas. Select(HashMap), - /// Use exactly the specified capabilities and areas. - Exact(InterestMap), + // /// Use exactly the specified capabilities and areas. + // TODO: removed this for now, maybe we want and need it. Will need serde support. + // Exact(InterestMap), } impl Interests { @@ -108,17 +107,44 @@ impl From for Interests { } /// Selector for an [`AreaOfInterest`]. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub enum AreaOfInterestSelector { /// Use the widest area allowed by a capability, with no further limits. #[default] Widest, /// Use the specified set of [`AreaOfInterest`]. + #[serde(with = "serde_area_of_interest_set")] Exact(HashSet), } +mod serde_area_of_interest_set { + // TODO: Less clones and allocs. + use crate::proto::grouping::serde_encoding::SerdeAreaOfInterest; + use serde::Deserializer; + + use super::*; + pub fn serialize( + items: &HashSet, + serializer: S, + ) -> Result { + let items: Vec<_> = items + .iter() + .map(|aoi| SerdeAreaOfInterest(aoi.clone())) + .collect(); + items.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let items: Vec = Deserialize::deserialize(deserializer)?; + Ok(items.into_iter().map(|aoi| aoi.0).collect()) + } +} + /// Selector for a capability. -#[derive(Debug, Clone, Hash, Eq, PartialEq)] +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct CapSelector { /// The namespace to which the capability must grant access. pub namespace_id: NamespaceId, @@ -208,13 +234,13 @@ impl ReceiverSelector { } /// Selector for the area to which a capability must grant access. -#[derive(Debug, Clone, Default, Hash, Eq, PartialEq)] +#[derive(Debug, Clone, Default, Hash, Eq, PartialEq, Serialize, Deserialize)] pub enum AreaSelector { /// Use the capability which covers the biggest area. #[default] Widest, /// Use any capability that covers the provided area. - ContainsArea(Area), + ContainsArea(#[serde(with = "grouping::serde_encoding::area")] Area), /// Use any capability that covers the provided point (i.e. entry). ContainsPoint(Point), } @@ -235,9 +261,9 @@ impl AreaSelector { #[derive(Debug, Serialize, Deserialize, Clone)] pub enum CapabilityPack { /// A read authorisation. - Read(SerdeReadAuthorisation), + Read(#[serde(with = "meadowcap::serde_encoding::read_authorisation")] ReadAuthorisation), /// A write authorisation. - Write(SerdeWriteCapability), + Write(#[serde(with = "meadowcap::serde_encoding::mc_capability")] McCapability), } impl CapabilityPack { @@ -252,7 +278,7 @@ impl CapabilityPack { // meadowcap capability themselves are validated on creation/deserialization. let is_valid = match self { Self::Read(cap) => cap.read_cap().access_mode() == AccessMode::Read, - Self::Write(cap) => cap.0.access_mode() == AccessMode::Write, + Self::Write(cap) => cap.access_mode() == AccessMode::Write, }; if !is_valid { Err(InvalidCapabilityPack) @@ -279,7 +305,7 @@ impl CapabilityPack { pub struct InvalidCapabilityPack; // TODO: This doesn't really belong into this module. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct DelegateTo { pub user: UserId, pub restrict_area: RestrictArea, @@ -295,10 +321,10 @@ impl DelegateTo { } // TODO: This doesn't really belong into this module. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub enum RestrictArea { None, - Restrict(Area), + Restrict(#[serde(with = "grouping::serde_encoding::area")] Area), } impl RestrictArea { diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 185f4b796a..990b6fc567 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -802,7 +802,7 @@ mod tests { timestamp: TimestampForm::Now, payload: PayloadForm::Bytes(payload.into()), }; - let (entry, inserted) = handle.insert(entry, AuthForm::Any(user_id)).await?; + let (entry, inserted) = handle.insert_entry(entry, AuthForm::Any(user_id)).await?; assert!(inserted); track_entries.extend([entry]); } diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 07fc2bbdfa..1c04982120 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -227,30 +227,95 @@ mod encoding { } pub mod serde_encoding { - use serde::{de, Deserialize, Deserializer, Serialize}; + use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use crate::util::codec2::{from_bytes, to_vec}; use super::*; - /// [`Entry`] wrapper that can be serialized with [`serde`]. - #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] - pub struct SerdeEntry(pub Entry); + pub mod path { + + use super::*; + pub fn serialize(path: &Path, serializer: S) -> Result { + to_vec(path).serialize(serializer) + } - impl Serialize for SerdeEntry { - fn serialize(&self, serializer: S) -> Result { - to_vec(&self.0).serialize(serializer) + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes: Vec = Deserialize::deserialize(deserializer)?; + let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; + Ok(decoded) } } - impl<'de> Deserialize<'de> for SerdeEntry { - fn deserialize(deserializer: D) -> Result + pub mod entry { + use super::*; + pub fn serialize(entry: &Entry, serializer: S) -> Result { + to_vec(entry).serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { let bytes: Vec = Deserialize::deserialize(deserializer)?; let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; - Ok(Self(decoded)) + Ok(decoded) } } + + /// [`Entry`] wrapper that can be serialized with [`serde`]. + #[derive( + Debug, + Clone, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeEntry(#[serde(with = "entry")] pub Entry); + + pub mod authorised_entry { + use crate::proto::meadowcap::serde_encoding::SerdeMcCapability; + use keys::UserSignature; + + use super::*; + pub fn serialize( + entry: &AuthorisedEntry, + serializer: S, + ) -> Result { + let (entry, token) = entry.clone().into_parts(); + ( + SerdeEntry(entry), + SerdeMcCapability(token.capability), + token.signature, + ) + .serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (entry, capability, signature): (SerdeEntry, SerdeMcCapability, UserSignature) = + Deserialize::deserialize(deserializer)?; + let token = AuthorisationToken::new(capability.0, signature); + Ok(AuthorisedEntry::new(entry.0, token).map_err(de::Error::custom)?) + } + } + + /// [`AuthorisedEntry`] wrapper that can be serialized with [`serde`]. + #[derive( + Debug, + Clone, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeAuthorisedEntry(#[serde(with = "authorised_entry")] pub AuthorisedEntry); } diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index b6ac09e8cf..a6e3510c6e 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -1,10 +1,12 @@ //! Utilities for Willow's entry [groupings](https://willowprotocol.org/specs/grouping-entries/index.html#grouping_entries). +use serde::{Deserialize, Serialize}; pub use willow_data_model::grouping::{Range, RangeEnd}; use willow_data_model::SubspaceId as _; use super::data_model::{ - Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH, + self, Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, + MAX_PATH_LENGTH, }; /// See [`willow_data_model::grouping::Range3d`]. @@ -91,8 +93,9 @@ impl AreaExt for Area { /// A single point in the 3D range space. /// /// I.e. an entry. -#[derive(Debug, Clone, Hash, Eq, PartialEq)] +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct Point { + #[serde(with = "data_model::serde_encoding::path")] pub path: Path, pub timestamp: Timestamp, pub subspace_id: SubspaceId, @@ -127,21 +130,40 @@ pub mod serde_encoding { use super::*; - #[derive( - Debug, Clone, Eq, PartialEq, derive_more::From, derive_more::Into, derive_more::Deref, - )] - pub struct SerdeAreaOfInterest(pub AreaOfInterest); - - impl Serialize for SerdeAreaOfInterest { - fn serialize(&self, serializer: S) -> Result { + pub mod area { + use super::*; + pub fn serialize( + area: &Area, + serializer: S, + ) -> Result { let previous = Area::new_full(); - let encoded_area = to_vec_relative(&previous, &self.0.area); - (encoded_area, self.0.max_count, self.0.max_size).serialize(serializer) + let encoded_area = to_vec_relative(&previous, area); + encoded_area.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let relative = Area::new_full(); + let encoded_area: Vec = Deserialize::deserialize(deserializer)?; + let area = from_bytes_relative(&relative, &encoded_area).map_err(de::Error::custom)?; + Ok(area) } } - impl<'de> Deserialize<'de> for SerdeAreaOfInterest { - fn deserialize(deserializer: D) -> Result + pub mod area_of_interest { + use super::*; + pub fn serialize( + aoi: &AreaOfInterest, + serializer: S, + ) -> Result { + let previous = Area::new_full(); + let encoded_area = to_vec_relative(&previous, &aoi.area); + (encoded_area, aoi.max_count, aoi.max_size).serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { @@ -149,31 +171,67 @@ pub mod serde_encoding { let (encoded_area, max_count, max_size): (Vec, u64, u64) = Deserialize::deserialize(deserializer)?; let area = from_bytes_relative(&relative, &encoded_area).map_err(de::Error::custom)?; - Ok(Self(AreaOfInterest::new(area, max_count, max_size))) + Ok(AreaOfInterest::new(area, max_count, max_size)) } } - #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] - pub struct SerdeRange3d(pub Range3d); - - impl Serialize for SerdeRange3d { - fn serialize(&self, serializer: S) -> Result { + pub mod range_3d { + use super::*; + pub fn serialize( + range: &Range3d, + serializer: S, + ) -> Result { let previous = Range3d::new_full(); - to_vec_relative(&previous, &self.0).serialize(serializer) + to_vec_relative(&previous, range).serialize(serializer) } - } - impl<'de> Deserialize<'de> for SerdeRange3d { - fn deserialize(deserializer: D) -> Result + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { let previous = Range3d::new_full(); let bytes: Vec = Deserialize::deserialize(deserializer)?; let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; - Ok(Self(decoded)) + Ok(decoded) } } + + #[derive( + Debug, + Clone, + Eq, + PartialEq, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeArea(#[serde(with = "area")] pub Area); + + #[derive( + Debug, + Clone, + Eq, + PartialEq, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeAreaOfInterest(#[serde(with = "area_of_interest")] pub AreaOfInterest); + + #[derive( + Debug, + Clone, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeRange3d(#[serde(with = "range_3d")] pub Range3d); } #[cfg(test)] diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 606b25a736..476e0db86a 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -63,7 +63,7 @@ impl IsCommunal for NamespacePublicKey { /// /// A [`NamespacePublicKey`] whose last bit is 1 is defined to be a communal namespace, /// and if the last bit is zero it is an owned namespace. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] pub enum NamespaceKind { /// Communal namespace, needs [`super::meadowcap::CommunalCapability`] to authorizse. Communal, diff --git a/iroh-willow/src/proto/meadowcap.rs b/iroh-willow/src/proto/meadowcap.rs index ea1afa2c31..ef0e5f119c 100644 --- a/iroh-willow/src/proto/meadowcap.rs +++ b/iroh-willow/src/proto/meadowcap.rs @@ -2,14 +2,14 @@ //! //! Contains an instantiation of [`meadowcap`] for use in iroh-willow. +use serde::{Deserialize, Serialize}; +use willow_data_model::AuthorisationToken; + use super::{ grouping::Area, keys::{self, NamespaceSecretKey, UserSecretKey}, }; -use serde::Serialize; -use willow_data_model::AuthorisationToken; - pub type UserPublicKey = keys::UserPublicKey; pub type NamespacePublicKey = keys::NamespacePublicKey; pub type UserId = keys::UserId; @@ -21,7 +21,7 @@ use super::data_model::{Entry, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PA pub use meadowcap::{AccessMode, IsCommunal}; -#[derive(Debug, derive_more::From)] +#[derive(Debug, derive_more::From, Serialize, Deserialize)] pub enum SecretKey { User(keys::UserSecretKey), Namespace(keys::NamespaceSecretKey), @@ -144,74 +144,133 @@ pub mod serde_encoding { use super::*; - #[derive( - Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, - )] - pub struct SerdeReadAuthorisation(pub ReadAuthorisation); - - impl Serialize for SerdeReadAuthorisation { - fn serialize(&self, serializer: S) -> Result { - let encoded_cap = to_vec_relative(&Area::new_full(), &self.0 .0); - let encoded_subspace_cap = self.0 .1.as_ref().map(to_vec); + pub mod read_authorisation { + use super::*; + pub fn serialize( + value: &ReadAuthorisation, + serializer: S, + ) -> Result { + let encoded_cap = to_vec_relative(&Area::new_full(), value.read_cap()); + let encoded_subspace_cap = value.subspace_cap().map(to_vec); (encoded_cap, encoded_subspace_cap).serialize(serializer) } - } - impl<'de> Deserialize<'de> for SerdeReadAuthorisation { - fn deserialize(deserializer: D) -> Result + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { let (read_cap, subspace_cap): (SerdeMcCapability, Option) = Deserialize::deserialize(deserializer)?; - Ok(Self(ReadAuthorisation( + Ok(ReadAuthorisation( read_cap.into(), subspace_cap.map(Into::into), - ))) + )) } } #[derive( - Debug, Clone, Eq, PartialEq, Hash, derive_more::From, derive_more::Into, derive_more::Deref, + Debug, + Clone, + Eq, + PartialEq, + Hash, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, )] - pub struct SerdeMcCapability(pub McCapability); - - impl Serialize for SerdeMcCapability { - fn serialize(&self, serializer: S) -> Result { + pub struct SerdeReadAuthorisation(#[serde(with = "read_authorisation")] pub ReadAuthorisation); + + pub mod mc_capability { + use super::*; + pub fn serialize( + value: &McCapability, + serializer: S, + ) -> Result { let previous = Area::new_full(); - to_vec_relative(&previous, &self.0).serialize(serializer) + to_vec_relative(&previous, value).serialize(serializer) } - } - impl<'de> Deserialize<'de> for SerdeMcCapability { - fn deserialize(deserializer: D) -> Result + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { let previous = Area::new_full(); let bytes: Vec = Deserialize::deserialize(deserializer)?; let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; - Ok(Self(decoded)) + Ok(decoded) } } - #[derive(Debug, Clone, derive_more::From, derive_more::Into, derive_more::Deref)] - pub struct SerdeMcSubspaceCapability(pub McSubspaceCapability); - - impl Serialize for SerdeMcSubspaceCapability { - fn serialize(&self, serializer: S) -> Result { - to_vec(&self.0).serialize(serializer) + #[derive( + Debug, + Clone, + Eq, + PartialEq, + Hash, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeMcCapability(#[serde(with = "mc_capability")] pub McCapability); + + pub mod mc_subspace_capability { + use super::*; + pub fn serialize( + value: &McSubspaceCapability, + serializer: S, + ) -> Result { + to_vec(value).serialize(serializer) } - } - impl<'de> Deserialize<'de> for SerdeMcSubspaceCapability { - fn deserialize(deserializer: D) -> Result + pub fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { let bytes: Vec = Deserialize::deserialize(deserializer)?; let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; - Ok(Self(decoded)) + Ok(decoded) + } + } + + #[derive( + Debug, + Clone, + derive_more::From, + derive_more::Into, + derive_more::Deref, + Serialize, + Deserialize, + )] + pub struct SerdeMcSubspaceCapability( + #[serde(with = "mc_subspace_capability")] pub McSubspaceCapability, + ); + + pub mod access_mode { + use super::*; + pub fn serialize( + value: &AccessMode, + serializer: S, + ) -> Result { + match value { + AccessMode::Read => 0u8.serialize(serializer), + AccessMode::Write => 1u8.serialize(serializer), + } + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value: u8 = Deserialize::deserialize(deserializer)?; + match value { + 0 => Ok(AccessMode::Read), + 1 => Ok(AccessMode::Write), + _ => Err(de::Error::custom("Invalid access mode")), + } } } } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index dd54b26cc8..ca27d9ac5d 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -10,6 +10,7 @@ use std::sync::Arc; use channels::ChannelSenders; +use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use crate::{ @@ -65,7 +66,7 @@ impl Role { /// * [`Self::Continuous`] will enable the live data channels to synchronize updates in real-time. /// * [`Self::ReconcileOnce`] will run a single reconciliation of the interests declared at session /// start, and then close the session. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub enum SessionMode { /// Run a single, full reconciliation, and then quit. ReconcileOnce, @@ -81,7 +82,7 @@ impl SessionMode { } /// Options to initialize a session. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct SessionInit { /// Selects the areas we wish to synchronize. pub interests: Interests, diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index 96327dca07..eda8d9ade5 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -18,7 +18,7 @@ use anyhow::Result; use futures_lite::{Stream, StreamExt}; use futures_util::FutureExt; use genawaiter::rc::Co; - +use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamMap, StreamNotifyClose}; use tokio_util::sync::PollSender; @@ -80,7 +80,7 @@ impl EventKind { } /// Updates that may be submitted from an intent into the synchronisation session. -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize)] pub enum IntentUpdate { /// Submit new interests into the session. AddInterests(Interests), @@ -642,3 +642,59 @@ fn flatten_interests(interests: &InterestMap) -> NamespaceInterests { } out } + +pub mod serde_encoding { + use serde::{Deserialize, Serialize}; + + use crate::proto::grouping::serde_encoding::{SerdeArea, SerdeAreaOfInterest}; + use crate::proto::keys::NamespaceId; + use crate::session::intents::EventKind; + + /// Serializable version of EventKind + #[derive(Debug, Clone, Serialize, Deserialize)] + pub enum Event { + CapabilityIntersection { + namespace: NamespaceId, + area: SerdeArea, + }, + InterestIntersection { + namespace: NamespaceId, + area: SerdeAreaOfInterest, + }, + Reconciled { + namespace: NamespaceId, + area: SerdeAreaOfInterest, + }, + ReconciledAll, + Abort { + error: String, // Simplified error representation + }, + } + + impl From for Event { + fn from(event: EventKind) -> Self { + match event { + EventKind::CapabilityIntersection { namespace, area } => { + Event::CapabilityIntersection { + namespace, + area: SerdeArea(area), + } + } + EventKind::InterestIntersection { namespace, area } => { + Event::InterestIntersection { + namespace, + area: SerdeAreaOfInterest(area), + } + } + EventKind::Reconciled { namespace, area } => Event::Reconciled { + namespace, + area: SerdeAreaOfInterest(area), + }, + EventKind::ReconciledAll => Event::ReconciledAll, + EventKind::Abort { error } => Event::Abort { + error: error.to_string(), + }, + } + } + } +} diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 54d1511c99..597cae90ae 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -117,8 +117,7 @@ impl Auth { } } Ok(out) - } - Interests::Exact(interests) => Ok(interests), + } // Interests::Exact(interests) => Ok(interests), } } diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 54aebf2d44..f64407d941 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -24,15 +24,15 @@ use crate::{ }; #[derive(Debug, Clone, Default)] -pub struct Store { +pub struct Store { secrets: Rc>, entries: Rc>, - payloads: iroh_blobs::store::mem::Store, + payloads: PS, caps: Rc>, } -impl Store { - pub fn new(payloads: iroh_blobs::store::mem::Store) -> Self { +impl Store { + pub fn new(payloads: PS) -> Self { Self { payloads, secrets: Default::default(), @@ -42,10 +42,10 @@ impl Store { } } -impl traits::Storage for Store { +impl traits::Storage for Store { type Entries = Rc>; type Secrets = Rc>; - type Payloads = iroh_blobs::store::mem::Store; + type Payloads = PS; type Caps = Rc>; fn entries(&self) -> &Self::Entries { diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index ab382b9719..910a3eea3e 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -435,7 +435,7 @@ mod util { ) -> Result<()> { let path = Path::from_bytes(path)?; let entry = EntryForm::new_bytes(namespace_id, path, bytes); - handle.insert(entry, user).await?; + handle.insert_entry(entry, user).await?; Ok(()) } } From c5971c39d986120f0a5b817072e13eaac14a7d7a Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 26 Aug 2024 21:55:47 +0200 Subject: [PATCH 164/198] feat: integrate willow in iroh --- Cargo.lock | 3 +- iroh-willow/Cargo.toml | 2 +- iroh/Cargo.toml | 4 +- iroh/src/node.rs | 1 + iroh/src/node/builder.rs | 11 +++ iroh/src/node/rpc.rs | 4 + iroh/src/node/rpc/spaces.rs | 164 ++++++++++++++++++++++++++++++++ iroh/src/rpc_protocol.rs | 3 + iroh/src/rpc_protocol/spaces.rs | 158 ++++++++++++++++++++++++++++++ 9 files changed, 347 insertions(+), 3 deletions(-) create mode 100644 iroh/src/node/rpc/spaces.rs create mode 100644 iroh/src/rpc_protocol/spaces.rs diff --git a/Cargo.lock b/Cargo.lock index 5c90f5255b..69534ea2e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2502,6 +2502,7 @@ dependencies = [ "iroh-net", "iroh-quinn", "iroh-test", + "iroh-willow", "nested_enum_utils", "num_cpus", "parking_lot", @@ -3012,7 +3013,7 @@ dependencies = [ [[package]] name = "iroh-willow" -version = "0.18.0" +version = "0.23.0" dependencies = [ "anyhow", "bytes", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 16ccf8cade..6f84329ad0 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-willow" -version = "0.22.0" +version = "0.23.0" edition = "2021" readme = "README.md" description = "willow protocol implementation for iroh" diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 243330410c..a7226a6787 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -31,6 +31,7 @@ iroh-base = { version = "0.23.0", path = "../iroh-base", features = ["key"] } iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.23.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.23.0", path = "../iroh-net", features = ["discovery-local-network"] } +iroh-willow = { version = "0.23.0", path = "../iroh-willow", optional = true } nested_enum_utils = "0.1.0" num_cpus = { version = "1.15.0" } portable-atomic = "1" @@ -62,7 +63,7 @@ console = { version = "0.15.5", optional = true } url = { version = "2.5.0", features = ["serde"] } [features] -default = ["metrics", "fs-store"] +default = ["metrics", "fs-store", "willow"] metrics = ["iroh-metrics", "iroh-blobs/metrics"] fs-store = ["iroh-blobs/fs-store"] test = [] @@ -70,6 +71,7 @@ examples = ["dep:clap", "dep:indicatif"] discovery-local-network = ["iroh-net/discovery-local-network", "examples", "dep:console"] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] +willow = ["dep:iroh-willow"] [dev-dependencies] anyhow = { version = "1" } diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 2b8a52edf2..ac699da00c 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -118,6 +118,7 @@ struct NodeInner { downloader: Downloader, blob_batches: tokio::sync::Mutex, local_pool_handle: LocalPoolHandle, + willow: iroh_willow::Engine, } /// Keeps track of all the currently active batch operations of the blobs api. diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 8ca6c05d21..8bfd488659 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -588,6 +588,16 @@ where ) .await?; + // Spawn the willow engine. + // TODO: Allow to disable. + let blobs_store = self.blobs_store.clone(); + let create_store = move || iroh_willow::store::memory::Store::new(blobs_store); + let willow = iroh_willow::Engine::spawn( + endpoint.clone(), + create_store, + iroh_willow::engine::AcceptOpts::default(), + ); + // Initialize the internal RPC connection. let (internal_rpc, controller) = quic_rpc::transport::flume::connection::(32); let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); @@ -608,6 +618,7 @@ where gossip, local_pool_handle: lp.handle().clone(), blob_batches: Default::default(), + willow, }); let protocol_builder = ProtocolBuilder { diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index 4baf5c9d39..a5f12ea9fc 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -77,6 +77,7 @@ use crate::rpc_protocol::{ use super::IrohServerEndpoint; mod docs; +mod spaces; const HEALTH_POLL_WAIT: Duration = Duration::from_secs(1); /// Chunk size for getting blobs over RPC @@ -463,6 +464,9 @@ impl Handler { Authors(msg) => self.handle_authors_request(msg, chan).await, Docs(msg) => self.handle_docs_request(msg, chan).await, Gossip(msg) => self.handle_gossip_request(msg, chan).await, + Spaces(msg) => { + self::spaces::handle_rpc_request(self.inner.willow.clone(), msg, chan).await + } } } diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs new file mode 100644 index 0000000000..2415d3c4a6 --- /dev/null +++ b/iroh/src/node/rpc/spaces.rs @@ -0,0 +1,164 @@ +use anyhow::Result; +use futures_lite::Stream; +use futures_util::SinkExt; +use futures_util::StreamExt; +use iroh_base::rpc::{RpcError, RpcResult}; +use iroh_willow::Engine; +use quic_rpc::server::{RpcChannel, RpcServerError}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; + +use crate::node::IrohServerEndpoint; +use crate::rpc_protocol::spaces::*; +use crate::rpc_protocol::RpcService; + +fn map_err(err: anyhow::Error) -> RpcError { + RpcError::from(err) +} + +pub(crate) async fn handle_rpc_request( + engine: Engine, + msg: Request, + chan: RpcChannel, +) -> Result<(), RpcServerError> { + use Request::*; + match msg { + IngestEntry(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .ingest_entry(req.authorised_entry) + .await + .map(|_| IngestEntryResponse) + .map_err(map_err) + }) + .await + } + InsertEntry(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .insert_entry(req.entry, req.auth) + .await + .map(|_| InsertEntryResponse) + .map_err(map_err) + }) + .await + } + InsertSecret(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .insert_secret(req.secret) + .await + .map(|_| InsertSecretResponse) + .map_err(map_err) + }) + .await + } + GetEntries(msg) => { + chan.try_server_streaming(msg, engine, |engine, req| async move { + let stream = engine + .get_entries(req.namespace, req.range) + .await + .map_err(map_err)?; + Ok(stream.map(|res| res.map(|e| GetEntriesResponse(e.into())).map_err(map_err))) + }) + .await + } + CreateNamespace(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .create_namespace(req.kind, req.owner) + .await + .map(CreateNamespaceResponse) + .map_err(map_err) + }) + .await + } + CreateUser(msg) => { + chan.rpc(msg, engine, |engine, _| async move { + engine + .create_user() + .await + .map(CreateUserResponse) + .map_err(map_err) + }) + .await + } + DelegateCaps(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .delegate_caps(req.from, req.access_mode, req.to) + .await + .map(DelegateCapsResponse) + .map_err(map_err) + }) + .await + } + ImportCaps(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .import_caps(req.caps) + .await + .map(|_| ImportCapsResponse) + .map_err(map_err) + }) + .await + } + // ResolveInterests(msg) => { + // chan.rpc(msg, engine, |engine, req| async move { + // engine + // .resolve_interests(req.interests) + // .await + // .map(ResolveInterestsResponse) + // .map_err(map_err) + // }) + // .await + // } + SyncWithPeer(msg) => { + chan.bidi_streaming(msg, engine, |engine, req, update_stream| { + // TODO: refactor to use less tasks + let (events_tx, events_rx) = tokio::sync::mpsc::channel(32); + tokio::task::spawn(async move { + if let Err(err) = + sync_with_peer(engine, req, events_tx.clone(), update_stream).await + { + let _ = events_tx.send(Err(err.into())).await; + } + }); + ReceiverStream::new(events_rx) + }) + .await + } + SyncWithPeerUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + } +} + +async fn sync_with_peer( + engine: Engine, + req: SyncWithPeerRequest, + events_tx: mpsc::Sender>, + mut update_stream: impl Stream + Unpin + Send + 'static, +) -> anyhow::Result<()> { + let handle = engine + .sync_with_peer(req.peer, req.init) + .await + .map_err(map_err)?; + let (mut update_sink, mut events) = handle.split(); + tokio::task::spawn(async move { + while let Some(update) = update_stream.next().await { + if let Err(_) = update_sink.send(update.0).await { + break; + } + } + }); + tokio::task::spawn(async move { + while let Some(event) = events.next().await { + if let Err(_) = events_tx + .send(Ok(SyncWithPeerResponse::Event(event.into()))) + .await + { + break; + } + } + }); + Ok(()) +} diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index b70e4b2e63..5b81f32225 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -23,6 +23,7 @@ pub mod docs; pub mod gossip; pub mod net; pub mod node; +pub mod spaces; pub mod tags; /// The RPC service for the iroh provider process. @@ -41,6 +42,7 @@ pub enum Request { Tags(tags::Request), Authors(authors::Request), Gossip(gossip::Request), + Spaces(spaces::Request), } /// The response enum, listing all possible responses. @@ -55,6 +57,7 @@ pub enum Response { Docs(docs::Response), Authors(authors::Response), Gossip(gossip::Response), + Spaces(spaces::Response), } impl quic_rpc::Service for RpcService { diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs new file mode 100644 index 0000000000..dac5714adf --- /dev/null +++ b/iroh/src/rpc_protocol/spaces.rs @@ -0,0 +1,158 @@ +use iroh_base::rpc::{RpcError, RpcResult}; +use iroh_net::NodeId; +use iroh_willow::{ + form::{AuthForm, SerdeEntryOrForm}, + interest::{CapSelector, CapabilityPack, DelegateTo}, + proto::{ + data_model::{self, serde_encoding::SerdeEntry, AuthorisedEntry}, + grouping::{self, Range3d}, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::{self, AccessMode, SecretKey}, + }, + session::{ + intents::{serde_encoding::Event, IntentUpdate}, + SessionInit, + }, +}; +use nested_enum_utils::enum_conversions; +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; + +use super::RpcService; + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Request)] +#[rpc_requests(RpcService)] +pub enum Request { + #[rpc(response = RpcResult)] + IngestEntry(IngestEntryRequest), + #[rpc(response = RpcResult)] + InsertEntry(InsertEntryRequest), + #[rpc(response = RpcResult)] + InsertSecret(InsertSecretRequest), + #[try_server_streaming(create_error = RpcError, item_error = RpcError, item = GetEntriesResponse)] + GetEntries(GetEntriesRequest), + #[rpc(response = RpcResult)] + CreateNamespace(CreateNamespaceRequest), + #[rpc(response = RpcResult)] + CreateUser(CreateUserRequest), + #[rpc(response = RpcResult)] + DelegateCaps(DelegateCapsRequest), + #[rpc(response = RpcResult)] + ImportCaps(ImportCapsRequest), + // #[rpc(response = RpcResult)] + // ResolveInterests(ResolveInterestsRequest), + #[bidi_streaming(update = SyncWithPeerUpdate, response = RpcResult)] + SyncWithPeer(SyncWithPeerRequest), + SyncWithPeerUpdate(SyncWithPeerUpdate), +} + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Response)] +pub enum Response { + IngestEntry(RpcResult), + InsertEntry(RpcResult), + InsertSecret(RpcResult), + GetEntries(RpcResult), + CreateNamespace(RpcResult), + CreateUser(RpcResult), + DelegateCaps(RpcResult), + ImportCaps(RpcResult), + // ResolveInterests(RpcResult), + SyncWithPeer(RpcResult), +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct IngestEntryRequest { + #[serde(with = "data_model::serde_encoding::authorised_entry")] + pub authorised_entry: AuthorisedEntry, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct IngestEntryResponse; + +#[derive(Debug, Serialize, Deserialize)] +pub struct InsertEntryRequest { + pub entry: SerdeEntryOrForm, + pub auth: AuthForm, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct InsertEntryResponse; + +#[derive(Debug, Serialize, Deserialize)] +pub struct InsertSecretRequest { + pub secret: SecretKey, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct InsertSecretResponse; + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetEntriesRequest { + pub namespace: NamespaceId, + #[serde(with = "grouping::serde_encoding::range_3d")] + pub range: Range3d, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetEntriesResponse(pub SerdeEntry); + +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateNamespaceRequest { + pub kind: NamespaceKind, + pub owner: UserId, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateNamespaceResponse(pub NamespaceId); + +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateUserRequest; + +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateUserResponse(pub UserId); + +#[derive(Debug, Serialize, Deserialize)] +pub struct DelegateCapsRequest { + pub from: CapSelector, + #[serde(with = "meadowcap::serde_encoding::access_mode")] + pub access_mode: AccessMode, + pub to: DelegateTo, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct DelegateCapsResponse(pub Vec); + +#[derive(Debug, Serialize, Deserialize)] +pub struct ImportCapsRequest { + pub caps: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ImportCapsResponse; + +// #[derive(Debug, Serialize, Deserialize)] +// pub struct ResolveInterestsRequest { +// pub interests: Interests, +// } + +// #[derive(Debug, Serialize, Deserialize)] +// pub struct ResolveInterestsResponse(pub InterestMap); + +#[derive(Debug, Serialize, Deserialize)] +pub struct SyncWithPeerRequest { + pub peer: NodeId, + pub init: SessionInit, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SyncWithPeerUpdate(pub IntentUpdate); + +#[derive(Debug, Serialize, Deserialize)] +pub enum SyncWithPeerResponse { + Started, + Event(Event), +} From 74f90ad46832ac1ebbe143e3834b396abb7e370e Mon Sep 17 00:00:00 2001 From: Frando Date: Tue, 27 Aug 2024 13:48:32 +0200 Subject: [PATCH 165/198] feat: add willow client --- iroh-willow/examples/bench.rs | 2 + iroh-willow/src/engine/peer_manager.rs | 6 +- iroh-willow/src/session/intents.rs | 61 ++++-- iroh/src/client.rs | 6 + iroh/src/client/spaces.rs | 260 +++++++++++++++++++++++++ 5 files changed, 315 insertions(+), 20 deletions(-) create mode 100644 iroh/src/client/spaces.rs diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index 80bc6ca47c..87be7bcbbf 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -86,6 +86,8 @@ async fn main() -> Result<()> { info!("betty has now {} entries", betty_count); assert_eq!(alfie_count, n_alfie + n_betty); assert_eq!(betty_count, n_alfie + n_betty); + alfie.shutdown().await?; + betty.shutdown().await?; Ok(()) } diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 68df7c87e2..9851b7544a 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -27,7 +27,7 @@ use crate::{ }, proto::wgps::AccessChallenge, session::{ - intents::{EventKind, Intent}, + intents::{EventKind, EventReceiver, Intent}, Error, InitialTransmission, Role, SessionEvent, SessionHandle, SessionInit, SessionUpdate, }, }; @@ -709,7 +709,7 @@ impl AcceptHandlers { #[derive(Debug)] struct EventForwarder { _join_handle: AbortingJoinHandle<()>, - stream_sender: mpsc::Sender<(NodeId, ReceiverStream)>, + stream_sender: mpsc::Sender<(NodeId, EventReceiver)>, } impl EventForwarder { @@ -737,7 +737,7 @@ impl EventForwarder { } } - pub async fn add_intent(&self, peer: NodeId, event_stream: ReceiverStream) { + pub async fn add_intent(&self, peer: NodeId, event_stream: EventReceiver) { self.stream_sender.send((peer, event_stream)).await.ok(); } } diff --git a/iroh-willow/src/session/intents.rs b/iroh-willow/src/session/intents.rs index eda8d9ade5..5095da03af 100644 --- a/iroh-willow/src/session/intents.rs +++ b/iroh-willow/src/session/intents.rs @@ -16,7 +16,7 @@ use std::{ use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use futures_util::FutureExt; +use futures_util::{FutureExt, Sink, SinkExt}; use genawaiter::rc::Co; use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; @@ -132,10 +132,7 @@ impl Intent { ) -> (Self, IntentHandle) { let (event_tx, event_rx) = mpsc::channel(event_cap); let (update_tx, update_rx) = mpsc::channel(update_cap); - let handle = IntentHandle { - event_rx, - update_tx, - }; + let handle = IntentHandle::from_mpsc(update_tx, event_rx); let channels = IntentChannels { event_tx, update_rx, @@ -178,19 +175,49 @@ pub enum Completion { /// otherwise the session will be blocked from progressing. /// /// The [`IntentHandle`] can also submit new interests into the session. -#[derive(Debug)] +#[derive(derive_more::Debug)] pub struct IntentHandle { - event_rx: Receiver, - update_tx: Sender, + #[debug("EventReceiver")] + event_rx: EventReceiver, + #[debug("UpdateSender")] + update_tx: UpdateSender, } +pub type UpdateSender = + Pin> + Send + 'static>>; +pub type EventReceiver = Pin + Send + 'static>>; + +#[derive(Debug, thiserror::Error)] +#[error("Failed to send update: Receiver dropped.")] +pub struct SendError(pub T); + impl IntentHandle { + pub fn new( + update_tx: UpdateSender, + event_rx: EventReceiver, + // PollSender::new(self.update_tx), + // ReceiverStream::new(self.event_rx), + ) -> Self { + Self { + update_tx, + event_rx, + } + } + + pub(crate) fn from_mpsc( + update_tx: mpsc::Sender, + event_rx: mpsc::Receiver, + ) -> Self { + let update_tx = PollSender::new(update_tx); + let update_tx = update_tx + .sink_map_err(|err| SendError(err.into_inner().expect("invalid use of Sink trait"))); + let event_rx = ReceiverStream::new(event_rx); + Self::new(Box::pin(update_tx), Box::pin(event_rx)) + } + /// Split the [`IntentHandle`] into a update sink and event stream. - pub fn split(self) -> (PollSender, ReceiverStream) { - ( - PollSender::new(self.update_tx), - ReceiverStream::new(self.event_rx), - ) + pub fn split(self) -> (UpdateSender, EventReceiver) { + (self.update_tx, self.event_rx) } /// Wait for the intent to be completed. @@ -204,7 +231,7 @@ impl IntentHandle { pub async fn complete(&mut self) -> Result> { let mut complete = false; let mut partial = false; - while let Some(event) = self.event_rx.recv().await { + while let Some(event) = self.event_rx.next().await { match event { EventKind::ReconciledAll => complete = true, // TODO: track partial reconciliations @@ -228,7 +255,7 @@ impl IntentHandle { /// /// The [`IntentHandle`] will then receive events for these interests in addition to already /// submitted interests. - pub async fn add_interests(&self, interests: impl Into) -> Result<()> { + pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { self.update_tx .send(IntentUpdate::AddInterests(interests.into())) .await?; @@ -236,7 +263,7 @@ impl IntentHandle { } /// Close the intent. - pub async fn close(&self) { + pub async fn close(&mut self) { self.update_tx.send(IntentUpdate::Close).await.ok(); } } @@ -245,7 +272,7 @@ impl Stream for IntentHandle { type Item = EventKind; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.event_rx).poll_recv(cx) + Pin::new(&mut self.event_rx).poll_next(cx) } } diff --git a/iroh/src/client.rs b/iroh/src/client.rs index 8e8d9942df..04c3332898 100644 --- a/iroh/src/client.rs +++ b/iroh/src/client.rs @@ -24,6 +24,7 @@ pub mod blobs; pub mod docs; pub mod gossip; pub mod net; +pub mod spaces; pub mod tags; /// Iroh rpc connection - boxed so that we can have a concrete type. @@ -72,6 +73,11 @@ impl Iroh { docs::Client::ref_cast(&self.rpc) } + /// Returns the spaces client. + pub fn spaces(&self) -> &spaces::Client { + spaces::Client::ref_cast(&self.rpc) + } + /// Returns the authors client. pub fn authors(&self) -> &authors::Client { authors::Client::ref_cast(&self.rpc) diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs new file mode 100644 index 0000000000..81c0cf4e3f --- /dev/null +++ b/iroh/src/client/spaces.rs @@ -0,0 +1,260 @@ +//! API for managing iroh spaces +//! +//! iroh spaces is an implementation of the [Willow] protocol. +//! The main entry point is the [`Client`]. +//! +//! You obtain a [`Client`] via [`Iroh::spaces()`](crate::client::Iroh::spaces). +//! +//! [Willow]: https://willowprotocol.org/ + +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use anyhow::Result; +use futures_lite::{Stream, StreamExt}; +use futures_util::{Sink, SinkExt}; +use iroh_base::key::NodeId; +use iroh_willow::{ + form::{AuthForm, SerdeEntryOrForm as EntryOrForm}, + interest::{CapSelector, CapabilityPack, DelegateTo, Interests}, + proto::{ + data_model::{AuthorisedEntry, Entry}, + grouping::Range3d, + keys::{NamespaceId, NamespaceKind, UserId}, + meadowcap::{AccessMode, SecretKey}, + }, + session::{ + intents::{serde_encoding::Event, Completion, IntentUpdate}, + SessionInit, + }, +}; +use ref_cast::RefCast; + +use crate::client::RpcClient; +use crate::rpc_protocol::spaces::*; + +/// Iroh Willow client. +#[derive(Debug, Clone, RefCast)] +#[repr(transparent)] +pub struct Client { + pub(super) rpc: RpcClient, +} + +impl Client { + /// Insert a new entry. + /// + /// `entry` can be a [`EntryForm`] or a `Entry`. + /// `auth` can either be a [`AuthForm`] or simply a [`UserId`]. + /// When passing a [`UserId`], a matching capability will be selected for the user. + /// If you want to select the capability to use more specifically, use the methods on [`AuthForm`]. + // TODO: Not sure I like the impl Into, better change to two methods. + pub async fn insert_entry( + &self, + entry: impl Into, + auth: impl Into, + ) -> Result<()> { + let req = InsertEntryRequest { + entry: entry.into(), + auth: auth.into(), + }; + let _res: InsertEntryResponse = self.rpc.rpc(req).await??; + Ok(()) + } + + /// Ingest an authorised entry. + // TODO: Not sure if we should expose this on the client at all. + pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { + let req = IngestEntryRequest { authorised_entry }; + self.rpc.rpc(req).await??; + Ok(()) + } + + /// Get entries from the Willow store. + pub async fn get_entries( + &self, + namespace: NamespaceId, + range: Range3d, + ) -> Result>> { + let req = GetEntriesRequest { namespace, range }; + let stream = self.rpc.try_server_streaming(req).await?; + Ok(stream.map(|res| res.map(|r| r.0.into()).map_err(Into::into))) + } + + /// Create a new namespace in the Willow store. + pub async fn create_namespace( + &self, + kind: NamespaceKind, + owner: UserId, + ) -> Result { + let req = CreateNamespaceRequest { kind, owner }; + let res: CreateNamespaceResponse = self.rpc.rpc(req).await??; + Ok(res.0) + } + + /// Create a new user in the Willow store. + pub async fn create_user(&self) -> Result { + let req = CreateUserRequest; + let res: CreateUserResponse = self.rpc.rpc(req).await??; + Ok(res.0) + } + + /// Delegate capabilities to another user. + /// + /// Returns a `Vec` of [`CapabilityPack`]s, which can be serialized. + pub async fn delegate_caps( + &self, + from: CapSelector, + access_mode: AccessMode, + to: DelegateTo, + ) -> Result> { + let req = DelegateCapsRequest { + from, + access_mode, + to, + }; + let res = self.rpc.rpc(req).await??; + Ok(res.0) + } + + /// Import capabilities. + pub async fn import_caps(&self, caps: Vec) -> Result<()> { + let req = ImportCapsRequest { caps }; + self.rpc.rpc(req).await??; + Ok(()) + } + + /// Synchronize with a peer. + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { + let req = SyncWithPeerRequest { peer, init }; + let (update_tx, event_rx) = self.rpc.bidi(req).await?; + + let update_tx = SinkExt::with( + update_tx, + |update| async move { Ok(SyncWithPeerUpdate(update)) }, + ); + let update_tx: UpdateSender = Box::pin(update_tx); + + let event_rx = Box::pin(event_rx.map(|res| match res { + Ok(Ok(SyncWithPeerResponse::Event(event))) => event, + Ok(Ok(SyncWithPeerResponse::Started)) => Event::ReconciledAll, // or another appropriate event + Err(e) => Event::Abort { + error: e.to_string(), + }, + Ok(Err(e)) => Event::Abort { + error: e.to_string(), + }, + })); + + Ok(IntentHandle::new(update_tx, event_rx)) + } + + /// Import a secret into the Willow store. + pub async fn import_secret(&self, secret: impl Into) -> Result<()> { + let req = InsertSecretRequest { + secret: secret.into(), + }; + self.rpc.rpc(req).await??; + Ok(()) + } +} +/// Handle to a synchronization intent. +/// +/// The `IntentHandle` is a `Stream` of `Event`s. It *must* be progressed in a loop, +/// otherwise the session will be blocked from progressing. +/// +/// The `IntentHandle` can also submit new interests into the session. +/// +// This version of IntentHandle differs from the one in iroh-willow intents module +// by using the Event type instead of EventKind, which serializes the error to a string +// to cross the RPC boundary. Maybe look into making the main iroh_willow Error type +// serializable instead. +#[derive(derive_more::Debug)] +pub struct IntentHandle { + #[debug("UpdateSender")] + update_tx: UpdateSender, + #[debug("EventReceiver")] + event_rx: EventReceiver, +} + +/// Sends updates into a reconciliation intent. +/// +/// Can be obtained from [`IntentHandle::split`]. +pub type UpdateSender = Pin + Send + 'static>>; + +/// Receives events for a reconciliation intent. +/// +/// Can be obtained from [`IntentHandle::split`]. +pub type EventReceiver = Pin + Send + 'static>>; + +impl IntentHandle { + /// Creates a new `IntentHandle` with the given update sender and event receiver. + fn new(update_tx: UpdateSender, event_rx: EventReceiver) -> Self { + Self { + update_tx, + event_rx, + } + } + + /// Splits the `IntentHandle` into a update sender sink and event receiver stream. + /// + /// The intent will be dropped once both the sender and receiver are dropped. + pub fn split(self) -> (UpdateSender, EventReceiver) { + (self.update_tx, self.event_rx) + } + + /// Waits for the intent to be completed. + /// + /// This future completes either if the session terminated, or if all interests of the intent + /// are reconciled and the intent is not in live data mode. + /// + /// Note that successful completion of this future does not guarantee that all interests were + /// fulfilled. + pub async fn complete(&mut self) -> Result { + let mut complete = false; + let mut partial = false; + while let Some(event) = self.event_rx.next().await { + match event { + Event::ReconciledAll => complete = true, + Event::Reconciled { .. } => partial = true, + Event::Abort { error } => return Err(anyhow::anyhow!(error)), + _ => {} + } + } + let completion = if complete { + Completion::Complete + } else if partial { + Completion::Partial + } else { + Completion::Nothing + }; + + Ok(completion) + } + + /// Submit new synchronisation interests into the session. + /// + /// The `IntentHandle` will then receive events for these interests in addition to already + /// submitted interests. + pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { + self.update_tx + .send(IntentUpdate::AddInterests(interests.into())) + .await?; + Ok(()) + } + + // TODO: I think all should work via dropping, but let's make sure that is the case. + // /// Close the intent. + // pub async fn close(&mut self) { + // self.update_tx.send(IntentUpdate::Close).await.ok(); + // } +} + +impl Stream for IntentHandle { + type Item = Event; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.event_rx).poll_next(cx) + } +} From f79a079514856366668569229d98eb02d56d0a01 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 00:07:33 +0200 Subject: [PATCH 166/198] refactor: remove willow feature flag for now --- iroh/Cargo.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index a7226a6787..8c661b5685 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -31,7 +31,7 @@ iroh-base = { version = "0.23.0", path = "../iroh-base", features = ["key"] } iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.23.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.23.0", path = "../iroh-net", features = ["discovery-local-network"] } -iroh-willow = { version = "0.23.0", path = "../iroh-willow", optional = true } +iroh-willow = { version = "0.23.0", path = "../iroh-willow" } nested_enum_utils = "0.1.0" num_cpus = { version = "1.15.0" } portable-atomic = "1" @@ -63,7 +63,7 @@ console = { version = "0.15.5", optional = true } url = { version = "2.5.0", features = ["serde"] } [features] -default = ["metrics", "fs-store", "willow"] +default = ["metrics", "fs-store"] metrics = ["iroh-metrics", "iroh-blobs/metrics"] fs-store = ["iroh-blobs/fs-store"] test = [] @@ -71,7 +71,6 @@ examples = ["dep:clap", "dep:indicatif"] discovery-local-network = ["iroh-net/discovery-local-network", "examples", "dep:console"] discovery-pkarr-dht = ["iroh-net/discovery-pkarr-dht"] test-utils = ["iroh-net/test-utils"] -willow = ["dep:iroh-willow"] [dev-dependencies] anyhow = { version = "1" } From 591c84b6455e5664c3c5ea3aa80bf5331f5d7a1f Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 11:45:02 +0200 Subject: [PATCH 167/198] feat: make the spaces client work, add test --- iroh-willow/examples/bench.rs | 2 +- iroh-willow/src/engine.rs | 4 +- iroh-willow/src/engine/actor.rs | 54 +++- iroh-willow/src/form.rs | 34 +- iroh-willow/src/interest.rs | 42 ++- iroh-willow/src/net.rs | 7 +- iroh-willow/src/session/reconciler.rs | 2 +- iroh-willow/src/store.rs | 15 +- iroh-willow/src/store/auth.rs | 4 +- iroh-willow/src/store/entry.rs | 5 + iroh-willow/src/store/memory.rs | 23 +- iroh-willow/src/store/traits.rs | 13 +- iroh-willow/tests/basic.rs | 4 +- iroh/src/client/spaces.rs | 442 ++++++++++++++++++++++---- iroh/src/node.rs | 5 + iroh/src/node/builder.rs | 4 + iroh/src/node/protocol.rs | 6 + iroh/src/node/rpc/spaces.rs | 32 +- iroh/src/rpc_protocol/spaces.rs | 68 +++- iroh/tests/spaces.rs | 134 ++++++++ 20 files changed, 758 insertions(+), 142 deletions(-) create mode 100644 iroh/tests/spaces.rs diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index 87be7bcbbf..d24e2b8c60 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -229,7 +229,7 @@ mod util { let cap_for_betty = alfie .delegate_caps( - CapSelector::widest(namespace_id), + CapSelector::any(namespace_id), AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index d33a480e81..a8486ef645 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -104,14 +104,14 @@ impl Engine { /// /// This will try to close all connections gracefully for up to 10 seconds, /// and abort them otherwise. - pub async fn shutdown(self) -> Result<()> { + pub async fn shutdown(mut self) -> Result<()> { debug!("shutdown engine"); let (reply, reply_rx) = oneshot::channel(); self.peer_manager_inbox .send(peer_manager::Input::Shutdown { reply }) .await?; reply_rx.await?; - let res = self.peer_manager_task.await; + let res = (&mut self.peer_manager_task).await; match res { Err(err) => error!(?err, "peer manager task panicked"), Ok(Err(err)) => error!(?err, "peer manager task failed"), diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 3295b208fd..37e626a7ef 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -14,7 +14,7 @@ use crate::{ interest::{CapSelector, CapabilityPack, DelegateTo, InterestMap, Interests}, net::ConnHandle, proto::{ - data_model::{AuthorisedEntry, Entry}, + data_model::{AuthorisedEntry, Path, SubspaceId}, grouping::Range3d, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, @@ -72,7 +72,7 @@ impl ActorHandle { Ok(()) } - pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { + pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result { let (reply, reply_rx) = oneshot::channel(); self.send(Input::IngestEntry { authorised_entry, @@ -80,15 +80,15 @@ impl ActorHandle { reply, }) .await?; - reply_rx.await??; - Ok(()) + let inserted = reply_rx.await??; + Ok(inserted) } pub async fn insert_entry( &self, entry: impl Into, auth: impl Into, - ) -> Result<(Entry, bool)> { + ) -> Result<(AuthorisedEntry, bool)> { let (reply, reply_rx) = oneshot::channel(); self.send(Input::InsertEntry { entry: entry.into(), @@ -108,11 +108,28 @@ impl ActorHandle { Ok(()) } + pub async fn get_entry( + &self, + namespace: NamespaceId, + subspace: SubspaceId, + path: Path, + ) -> Result> { + let (reply, reply_rx) = oneshot::channel(); + self.send(Input::GetEntry { + namespace, + subspace, + path, + reply, + }) + .await?; + reply_rx.await? + } + pub async fn get_entries( &self, namespace: NamespaceId, range: Range3d, - ) -> Result>> { + ) -> Result>> { let (tx, rx) = flume::bounded(1024); self.send(Input::GetEntries { namespace, @@ -220,7 +237,13 @@ pub enum Input { namespace: NamespaceId, range: Range3d, #[debug(skip)] - reply: flume::Sender>, + reply: flume::Sender>, + }, + GetEntry { + namespace: NamespaceId, + subspace: SubspaceId, + path: Path, + reply: oneshot::Sender>>, }, IngestEntry { authorised_entry: AuthorisedEntry, @@ -230,7 +253,7 @@ pub enum Input { InsertEntry { entry: EntryOrForm, auth: AuthForm, - reply: oneshot::Sender>, + reply: oneshot::Sender>, }, InsertSecret { secret: meadowcap::SecretKey, @@ -370,7 +393,7 @@ impl Actor { Err(err) => reply.send(Err(err)).map_err(send_reply_error), Ok(snapshot) => { self.tasks.spawn_local(async move { - let iter = snapshot.get_entries(namespace, &range); + let iter = snapshot.get_authorised_entries(namespace, &range); for entry in iter { if reply.send_async(entry).await.is_err() { break; @@ -381,6 +404,19 @@ impl Actor { } } } + Input::GetEntry { + namespace, + subspace, + path, + reply, + } => { + let res = self + .store + .entries() + .reader() + .get_entry(namespace, subspace, &path); + send_reply(reply, res) + } Input::IngestEntry { authorised_entry, origin, diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index a03448b6fa..6f36267377 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -15,8 +15,9 @@ use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; use crate::proto::{ - data_model::{self, Entry, NamespaceId, Path, SerdeWriteCapability, SubspaceId, Timestamp}, + data_model::{self, Entry, NamespaceId, Path, SubspaceId, Timestamp}, keys::UserId, + meadowcap::{self, WriteCapability}, }; /// Sources where payload data can come from. @@ -25,6 +26,9 @@ pub enum PayloadForm { /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail /// otherwise. Hash(Hash), + /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail + /// otherwise. + HashUnchecked(Hash, u64), /// Import data from the provided bytes and set as payload. #[debug("Bytes({})", _0.len())] Bytes(Bytes), @@ -49,6 +53,7 @@ impl PayloadForm { let entry = entry.ok_or_else(|| anyhow::anyhow!("hash not foundA"))?; (digest, entry.size().value()) } + PayloadForm::HashUnchecked(digest, len) => (digest, len), PayloadForm::Bytes(bytes) => { let len = bytes.len(); let temp_tag = store.import_bytes(bytes, BlobFormat::Raw).await?; @@ -118,7 +123,7 @@ pub enum AuthForm { /// user. Any(UserId), /// Use the provided [`WriteCapability`]. - Exact(SerdeWriteCapability), + Exact(#[serde(with = "meadowcap::serde_encoding::mc_capability")] WriteCapability), } impl AuthForm { @@ -134,18 +139,20 @@ impl AuthForm { /// Set the subspace either to a provided [`SubspaceId`], or use the user authenticating the entry /// as subspace. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum SubspaceForm { /// Set the subspace to the [`UserId`] of the user authenticating the entry. + #[default] User, /// Set the subspace to the provided [`SubspaceId`]. Exact(SubspaceId), } /// Set the timestamp either to the provided [`Timestamp`] or to the current system time. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum TimestampForm { /// Set the timestamp to the current system time. + #[default] Now, /// Set the timestamp to the provided value. Exact(Timestamp), @@ -175,20 +182,23 @@ pub struct SerdeEntryForm { #[serde(with = "data_model::serde_encoding::path")] pub path: Path, pub timestamp: TimestampForm, - pub payload: SerdePayloadForm, + pub payload: PayloadForm2, } +/// #[derive(Debug, Serialize, Deserialize)] -pub enum SerdePayloadForm { - /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail - /// otherwise. - Hash(Hash), +pub enum PayloadForm2 { + /// Make sure the hash is available in the blob store, and use the length from the blob store. + Checked(Hash), + /// Insert with the specified hash and length, without checking if the blob is in the local blob store. + Unchecked(Hash, u64), } -impl From for PayloadForm { - fn from(value: SerdePayloadForm) -> Self { +impl From for PayloadForm { + fn from(value: PayloadForm2) -> Self { match value { - SerdePayloadForm::Hash(hash) => PayloadForm::Hash(hash), + PayloadForm2::Checked(hash) => PayloadForm::Hash(hash), + PayloadForm2::Unchecked(hash, len) => PayloadForm::HashUnchecked(hash, len), } } } diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index b76efce869..efe6464d4b 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -62,14 +62,20 @@ impl IntoAreaOfInterest for Area { } impl InterestBuilder { - /// Add the full area of a capability we have into the interests. + /// Add a capability and areas of interest /// /// See [`CapSelector`] for how to specify the capability to use. - pub fn add_full_cap(mut self, cap: impl Into) -> Self { + pub fn add(mut self, cap: impl Into, areas: AreaOfInterestSelector) -> Self { let cap = cap.into(); - self.0.insert(cap, AreaOfInterestSelector::Widest); + self.0.insert(cap, areas); self } + /// Add the full area of a capability we have into the interests. + /// + /// See [`CapSelector`] for how to specify the capability to use. + pub fn add_full_cap(self, cap: impl Into) -> Self { + self.add(cap, AreaOfInterestSelector::Widest) + } /// Add a specific area included in one of our capabilities into the interests. /// @@ -149,14 +155,14 @@ pub struct CapSelector { /// The namespace to which the capability must grant access. pub namespace_id: NamespaceId, /// Select the user who may use the capability. - pub receiver: ReceiverSelector, + pub receiver: UserSelector, /// Select the area to which the capability grants access. pub granted_area: AreaSelector, } impl From for CapSelector { fn from(value: NamespaceId) -> Self { - Self::widest(value) + Self::any(value) } } @@ -171,7 +177,7 @@ impl CapSelector { /// Creates a new [`CapSelector`]. pub fn new( namespace_id: NamespaceId, - receiver: ReceiverSelector, + receiver: UserSelector, granted_area: AreaSelector, ) -> Self { Self { @@ -186,7 +192,7 @@ impl CapSelector { pub fn with_user(namespace_id: NamespaceId, user_id: UserId) -> Self { Self::new( namespace_id, - ReceiverSelector::Exact(user_id), + UserSelector::Exact(user_id), AreaSelector::Widest, ) } @@ -196,13 +202,13 @@ impl CapSelector { /// Will use any user available in our secret store and select the capability which grants the /// widest area. // TODO: Document exact selection process if there are capabilities with distinct areas. - pub fn widest(namespace: NamespaceId) -> Self { - Self::new(namespace, ReceiverSelector::Any, AreaSelector::Widest) + pub fn any(namespace: NamespaceId) -> Self { + Self::new(namespace, UserSelector::Any, AreaSelector::Widest) } /// Select a capability which authorises writing the provided `entry` on behalf of the provided /// `user_id`. - pub fn for_entry(entry: &Entry, user_id: ReceiverSelector) -> Self { + pub fn for_entry(entry: &Entry, user_id: UserSelector) -> Self { let granted_area = AreaSelector::ContainsPoint(Point::from_entry(entry)); Self { namespace_id: *entry.namespace_id(), @@ -216,7 +222,7 @@ impl CapSelector { #[derive( Debug, Default, Clone, Copy, Eq, PartialEq, derive_more::From, Serialize, Deserialize, Hash, )] -pub enum ReceiverSelector { +pub enum UserSelector { /// The receiver may be any user for which we have a secret key stored. #[default] Any, @@ -224,7 +230,7 @@ pub enum ReceiverSelector { Exact(UserId), } -impl ReceiverSelector { +impl UserSelector { pub fn includes(&self, user: &UserId) -> bool { match self { Self::Any => true, @@ -274,6 +280,13 @@ impl CapabilityPack { } } + pub fn namespace(&self) -> NamespaceId { + match self { + CapabilityPack::Read(cap) => cap.namespace(), + CapabilityPack::Write(cap) => *cap.granted_namespace(), + } + } + pub fn validate(&self) -> Result<(), InvalidCapabilityPack> { // meadowcap capability themselves are validated on creation/deserialization. let is_valid = match self { @@ -321,14 +334,15 @@ impl DelegateTo { } // TODO: This doesn't really belong into this module. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum RestrictArea { + #[default] None, Restrict(#[serde(with = "grouping::serde_encoding::area")] Area), } impl RestrictArea { - pub fn with_default(self, default: Area) -> Area { + pub fn or_default(self, default: Area) -> Area { match self { RestrictArea::None => default.clone(), RestrictArea::Restrict(area) => area, diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 990b6fc567..e2b1a6f45b 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -476,7 +476,7 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( - CapSelector::widest(namespace_id), + CapSelector::any(namespace_id), AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) @@ -607,7 +607,7 @@ mod tests { let cap_for_betty = handle_alfie .delegate_caps( - CapSelector::widest(namespace_id), + CapSelector::any(namespace_id), AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) @@ -778,6 +778,7 @@ mod tests { let entries: Result> = store .get_entries(namespace, Range3d::new_full()) .await? + .map(|entry| entry.map(|entry| entry.into_parts().0)) .try_collect() .await; entries @@ -804,7 +805,7 @@ mod tests { }; let (entry, inserted) = handle.insert_entry(entry, AuthForm::Any(user_id)).await?; assert!(inserted); - track_entries.extend([entry]); + track_entries.extend([entry.into_parts().0]); } Ok(()) } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 29a7df0258..edd4bf6379 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -565,7 +565,7 @@ impl Target { for authorised_entry in self .snapshot - .get_entries_with_authorisation(self.namespace(), range) + .get_authorised_entries(self.namespace(), range) { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index 17d34d64e4..e32511ae62 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -11,7 +11,7 @@ use rand_core::CryptoRngCore; use crate::{ form::{AuthForm, EntryForm, EntryOrForm, SubspaceForm, TimestampForm}, - interest::{CapSelector, ReceiverSelector}, + interest::{CapSelector, UserSelector}, proto::{ data_model::Entry, data_model::{AuthorisedEntry, PayloadDigest}, @@ -66,16 +66,20 @@ impl Store { &self.auth } - pub async fn insert_entry(&self, entry: EntryOrForm, auth: AuthForm) -> Result<(Entry, bool)> { + pub async fn insert_entry( + &self, + entry: EntryOrForm, + auth: AuthForm, + ) -> Result<(AuthorisedEntry, bool)> { let user_id = auth.user_id(); let entry = match entry { EntryOrForm::Entry(entry) => Ok(entry), EntryOrForm::Form(form) => self.form_to_entry(form, user_id).await, }?; let capability = match auth { - AuthForm::Exact(cap) => cap.0, + AuthForm::Exact(cap) => cap, AuthForm::Any(user_id) => { - let selector = CapSelector::for_entry(&entry, ReceiverSelector::Exact(user_id)); + let selector = CapSelector::for_entry(&entry, UserSelector::Exact(user_id)); self.auth() .get_write_cap(&selector)? .ok_or_else(|| anyhow!("no write capability available"))? @@ -96,8 +100,7 @@ impl Store { let inserted = self .entries() .ingest(&authorised_entry, EntryOrigin::Local)?; - let (entry, _token) = authorised_entry.into_parts(); - Ok((entry, inserted)) + Ok((authorised_entry, inserted)) } pub fn create_namespace( diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 597cae90ae..3a65ca1dcc 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -220,7 +220,7 @@ impl Auth { .secrets .get_user(user_id) .ok_or(AuthError::MissingUserSecret(*user_id))?; - let area = restrict_area.with_default(read_cap.granted_area()); + let area = restrict_area.or_default(read_cap.granted_area()); let new_read_cap = read_cap.delegate(&user_secret, &to, &area)?; let new_subspace_cap = if let Some(subspace_cap) = subspace_cap { @@ -252,7 +252,7 @@ impl Auth { .secrets .get_user(cap.receiver()) .ok_or(AuthError::MissingUserSecret(*cap.receiver()))?; - let area = restrict_area.with_default(cap.granted_area()); + let area = restrict_area.or_default(cap.granted_area()); let new_cap = cap.delegate(&user_secret, &to, &area)?; Ok(CapabilityPack::Write(new_cap.into())) } diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs index eb3c5d499e..499066f1ee 100644 --- a/iroh-willow/src/store/entry.rs +++ b/iroh-willow/src/store/entry.rs @@ -69,6 +69,11 @@ impl WatchableEntryStore { self.storage.snapshot() } + /// Returns a store reader. + pub fn reader(&self) -> ES::Reader { + self.storage.reader() + } + /// Ingest a new entry. /// /// Returns `true` if the entry was stored, and `false` if the entry already exists or is diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index f64407d941..c15aa9ca5a 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -14,7 +14,7 @@ use anyhow::Result; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ - data_model::{AuthorisedEntry, Entry, EntryExt, WriteCapability}, + data_model::{AuthorisedEntry, Entry, EntryExt, Path, SubspaceId, WriteCapability}, grouping::{Range, Range3d, RangeEnd}, keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, meadowcap::{self, is_wider_than, ReadAuthorisation}, @@ -185,7 +185,7 @@ impl traits::EntryReader for Rc> { Ok(self.get_entries(namespace, range).count() as u64) } - fn get_entries_with_authorisation<'a>( + fn get_authorised_entries<'a>( &'a self, namespace: NamespaceId, range: &Range3d, @@ -200,6 +200,25 @@ impl traits::EntryReader for Rc> { .collect::>() .into_iter() } + + fn get_entry( + &self, + namespace: NamespaceId, + subspace: SubspaceId, + path: &Path, + ) -> Result> { + let inner = self.borrow(); + let Some(entries) = inner.entries.get(&namespace) else { + return Ok(None); + }; + Ok(entries + .iter() + .find(|e| { + let e = e.entry(); + *e.namespace_id() == namespace && *e.subspace_id() == subspace && e.path() == path + }) + .cloned()) + } } impl traits::EntryStorage for Rc> { diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 8cfd8571ea..ceb13dd671 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -7,7 +7,7 @@ use anyhow::Result; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ - data_model::{AuthorisedEntry, Entry, NamespaceId, WriteCapability}, + data_model::{AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, WriteCapability}, grouping::Range3d, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self, ReadAuthorisation}, @@ -98,7 +98,14 @@ pub trait EntryReader: Debug + 'static { fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result; - fn get_entries_with_authorisation<'a>( + fn get_entry( + &self, + namespace: NamespaceId, + subspace: SubspaceId, + path: &Path, + ) -> Result>; + + fn get_authorised_entries<'a>( &'a self, namespace: NamespaceId, range: &Range3d, @@ -109,7 +116,7 @@ pub trait EntryReader: Debug + 'static { namespace: NamespaceId, range: &Range3d, ) -> impl Iterator> { - self.get_entries_with_authorisation(namespace, range) + self.get_authorised_entries(namespace, range) .map(|e| e.map(|e| e.into_parts().0)) } } diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 910a3eea3e..30afa0b3fd 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -416,7 +416,7 @@ mod util { let cap_for_betty = alfie .delegate_caps( - CapSelector::widest(namespace_id), + CapSelector::any(namespace_id), AccessMode::Write, DelegateTo::new(user_betty, RestrictArea::None), ) @@ -508,7 +508,7 @@ async fn peer_manager_big_payload() -> Result<()> { let entries: Vec<_> = entries.try_collect().await?; assert_eq!(entries.len(), 1); let entry = &entries[0]; - let hash: iroh_blobs::Hash = (*entry.payload_digest()).into(); + let hash: iroh_blobs::Hash = (*entry.entry().payload_digest()).into(); let blob = alfie.blobs.get(&hash).await?.expect("missing blob"); let actual = blob.data_reader().await?.read_to_end().await?; assert_eq!(actual.len(), payload.len()); diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 81c0cf4e3f..a183302896 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -8,20 +8,27 @@ //! [Willow]: https://willowprotocol.org/ use std::{ + collections::HashMap, + path::PathBuf, pin::Pin, task::{Context, Poll}, }; use anyhow::Result; +use bytes::Bytes; use futures_lite::{Stream, StreamExt}; use futures_util::{Sink, SinkExt}; use iroh_base::key::NodeId; +use iroh_blobs::Hash; +use iroh_net::NodeAddr; use iroh_willow::{ - form::{AuthForm, SerdeEntryOrForm as EntryOrForm}, - interest::{CapSelector, CapabilityPack, DelegateTo, Interests}, + form::{AuthForm, PayloadForm2, SerdeEntryForm, SubspaceForm, TimestampForm}, + interest::{ + AreaOfInterestSelector, CapSelector, CapabilityPack, DelegateTo, Interests, RestrictArea, + }, proto::{ - data_model::{AuthorisedEntry, Entry}, - grouping::Range3d, + data_model::{AuthorisedEntry, Path, SubspaceId}, + grouping::{Area, Range3d}, keys::{NamespaceId, NamespaceKind, UserId}, meadowcap::{AccessMode, SecretKey}, }, @@ -31,6 +38,9 @@ use iroh_willow::{ }, }; use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; +use tokio::io::AsyncRead; +use tokio_stream::StreamMap; use crate::client::RpcClient; use crate::rpc_protocol::spaces::*; @@ -43,54 +53,14 @@ pub struct Client { } impl Client { - /// Insert a new entry. - /// - /// `entry` can be a [`EntryForm`] or a `Entry`. - /// `auth` can either be a [`AuthForm`] or simply a [`UserId`]. - /// When passing a [`UserId`], a matching capability will be selected for the user. - /// If you want to select the capability to use more specifically, use the methods on [`AuthForm`]. - // TODO: Not sure I like the impl Into, better change to two methods. - pub async fn insert_entry( - &self, - entry: impl Into, - auth: impl Into, - ) -> Result<()> { - let req = InsertEntryRequest { - entry: entry.into(), - auth: auth.into(), - }; - let _res: InsertEntryResponse = self.rpc.rpc(req).await??; - Ok(()) - } - - /// Ingest an authorised entry. - // TODO: Not sure if we should expose this on the client at all. - pub async fn ingest_entry(&self, authorised_entry: AuthorisedEntry) -> Result<()> { - let req = IngestEntryRequest { authorised_entry }; - self.rpc.rpc(req).await??; - Ok(()) - } - - /// Get entries from the Willow store. - pub async fn get_entries( - &self, - namespace: NamespaceId, - range: Range3d, - ) -> Result>> { - let req = GetEntriesRequest { namespace, range }; - let stream = self.rpc.try_server_streaming(req).await?; - Ok(stream.map(|res| res.map(|r| r.0.into()).map_err(Into::into))) + fn net(&self) -> &super::net::Client { + super::net::Client::ref_cast(&self.rpc) } - /// Create a new namespace in the Willow store. - pub async fn create_namespace( - &self, - kind: NamespaceKind, - owner: UserId, - ) -> Result { + pub async fn create(&self, kind: NamespaceKind, owner: UserId) -> Result { let req = CreateNamespaceRequest { kind, owner }; - let res: CreateNamespaceResponse = self.rpc.rpc(req).await??; - Ok(res.0) + let res = self.rpc.rpc(req).await??; + Ok(Space::new(self.rpc.clone(), res.0)) } /// Create a new user in the Willow store. @@ -125,6 +95,34 @@ impl Client { Ok(()) } + /// Import a ticket and start to synchronize. + pub async fn import_and_sync( + &self, + ticket: SpaceTicket, + ) -> Result<(Space, MergedIntentHandle)> { + if ticket.caps.is_empty() { + anyhow::bail!("Invalid ticket: Does not include any capabilities"); + } + let mut namespaces = ticket.caps.iter().map(|pack| pack.namespace()); + let namespace = namespaces.next().expect("just checked"); + if !namespaces.all(|n| n == namespace) { + anyhow::bail!("Invalid ticket: Capabilities do not all refer to the same namespace"); + } + + self.import_caps(ticket.caps).await?; + let interests = Interests::builder().add_full_cap(CapSelector::any(namespace)); + let init = SessionInit::reconcile_once(interests); + let mut intents = MergedIntentHandle::default(); + for addr in ticket.nodes { + let node_id = addr.node_id; + self.net().add_node_addr(addr).await?; + let intent = self.sync_with_peer(node_id, init.clone()).await?; + intents.insert(node_id, intent); + } + let space = Space::new(self.rpc.clone(), namespace); + Ok((space, intents)) + } + /// Synchronize with a peer. pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { let req = SyncWithPeerRequest { peer, init }; @@ -159,6 +157,265 @@ impl Client { Ok(()) } } + +/// A space to store entries in. +#[derive(Debug, Clone)] +pub struct Space { + rpc: RpcClient, + namespace_id: NamespaceId, +} + +impl Space { + fn new(rpc: RpcClient, namespace_id: NamespaceId) -> Self { + Self { rpc, namespace_id } + } + + fn blobs(&self) -> &super::blobs::Client { + super::blobs::Client::ref_cast(&self.rpc) + } + + fn spaces(&self) -> &Client { + Client::ref_cast(&self.rpc) + } + + fn net(&self) -> &super::net::Client { + super::net::Client::ref_cast(&self.rpc) + } + + /// Returns the identifier for this space. + pub fn namespace_id(&self) -> NamespaceId { + self.namespace_id + } + + async fn insert(&self, entry: EntryForm, payload: PayloadForm2) -> Result { + let form = SerdeEntryForm { + namespace_id: self.namespace_id, + subspace_id: entry.subspace_id, + path: entry.path, + timestamp: entry.timestamp, + payload, + }; + let auth = entry.auth; + let req = InsertEntryRequest { + entry: form.into(), + auth, + }; + let res = self.rpc.rpc(req).await??; + Ok(res) + } + + // Insert a new entry. + + // `entry` can be a [`EntryForm`] or a `Entry`. + // `auth` can either be a [`AuthForm`] or simply a [`UserId`]. + // When passing a [`UserId`], a matching capability will be selected for the user. + // If you want to select the capability to use more specifically, use the methods on [`AuthForm`]. + // TODO: Not sure I like the impl Into, better change to two methods. + /// Inserts a new entry, with the payload digest set to a hash. + /// + /// Note that the payload must exist in the local blob store, otherwise the operation will fail. + pub async fn insert_hash(&self, entry: EntryForm, payload: Hash) -> Result { + let payload = PayloadForm2::Checked(payload); + self.insert(entry, payload).await + } + + /// Inserts a new entry, with the payload imported from a byte string. + pub async fn insert_bytes( + &self, + entry: EntryForm, + payload: impl Into, + ) -> Result { + let batch = self.blobs().batch().await?; + let tag = batch.add_bytes(payload).await?; + self.insert_hash(entry, *tag.hash()).await + } + + /// Inserts a new entry, with the payload imported from a byte reader. + pub async fn insert_reader( + &self, + entry: EntryForm, + payload: impl AsyncRead + Send + Unpin + 'static, + ) -> Result { + let batch = self.blobs().batch().await?; + let tag = batch.add_reader(payload).await?; + self.insert_hash(entry, *tag.hash()).await + } + + /// Inserts a new entry, with the payload imported from a byte stream. + pub async fn insert_stream( + &self, + entry: EntryForm, + payload: impl Stream> + Send + Unpin + 'static, + ) -> Result { + let batch = self.blobs().batch().await?; + let tag = batch.add_stream(payload).await?; + self.insert_hash(entry, *tag.hash()).await + } + + /// Inserts a new entry, with the payload imported from a file. + pub async fn insert_from_path( + &self, + entry: EntryForm, + payload: PathBuf, + ) -> Result { + let batch = self.blobs().batch().await?; + let (tag, _len) = batch.add_file(payload).await?; + self.insert_hash(entry, *tag.hash()).await + } + + /// Ingest an authorised entry. + // TODO: Not sure if we should expose this on the client at all. + pub async fn ingest(&self, authorised_entry: AuthorisedEntry) -> Result<()> { + let req = IngestEntryRequest { authorised_entry }; + self.rpc.rpc(req).await??; + Ok(()) + } + + /// Get a single entry. + pub async fn get_one( + &self, + subspace: SubspaceId, + path: Path, + ) -> Result> { + let req = GetEntryRequest { + namespace: self.namespace_id, + subspace, + path, + }; + let entry = self.rpc.rpc(req).await??; + Ok(entry.0.map(Into::into)) + } + + /// Get entries by range. + pub async fn get_many( + &self, + range: Range3d, + ) -> Result>> { + let req = GetEntriesRequest { + namespace: self.namespace_id, + range, + }; + let stream = self.rpc.try_server_streaming(req).await?; + Ok(stream.map(|res| res.map(|r| r.0).map_err(Into::into))) + } + + /// Syncs with a peer and quit the session after a single reconciliation of the selected areas. + /// + /// Returns an [`IntentHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, + /// await [`IntentHandle::complete`]. + /// + /// This will connect to the node, start a sync session, and submit all our capabilities for this namespace, + /// constrained to the selected areas. + /// + /// If you want to specify the capabilities to submit more concretely, use [`Client::sync_with_peer`]. + pub async fn sync_once( + &self, + node: NodeId, + areas: AreaOfInterestSelector, + ) -> Result { + let cap = CapSelector::any(self.namespace_id); + let interests = Interests::builder().add(cap, areas); + let init = SessionInit::reconcile_once(interests); + self.spaces().sync_with_peer(node, init).await + } + + /// Sync with a peer and keep sending and receiving live updates for the selected areas. + /// + /// Returns an [`IntentHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, + /// await [`IntentHandle::complete`]. + /// + /// This will connect to the node, start a sync session, and submit all our capabilities for this namespace, + /// constrained to the selected areas. + /// + /// If you want to specify the capabilities to submit more concretely, use [`Client::sync_with_peer`]. + pub async fn sync_continuously( + &self, + node: NodeId, + areas: AreaOfInterestSelector, + ) -> Result { + let cap = CapSelector::any(self.namespace_id); + let interests = Interests::builder().add(cap, areas); + let init = SessionInit::continuous(interests); + self.spaces().sync_with_peer(node, init).await + } + + /// Share access to this space, or parts of this space, with another user. + /// + /// This will use any matching capability as the source of the capability delegation. + /// If you want to specify more options, use [`Client::delegate_caps`]. + pub async fn share( + &self, + receiver: UserId, + access_mode: AccessMode, + restrict_area: RestrictArea, + ) -> Result { + let caps = self + .spaces() + .delegate_caps( + CapSelector::any(self.namespace_id), + access_mode, + DelegateTo::new(receiver, restrict_area), + ) + .await?; + let node_addr = self.net().node_addr().await?; + Ok(SpaceTicket { + caps, + nodes: vec![node_addr], + }) + } + + /// TODO + pub async fn subscribe(&self, _area: Area) { + todo!() + } + + /// TODO + pub async fn subscribe_offset(&self, _area: Area, _offset: u64) { + todo!() + } +} + +/// A ticket to import and sync a space. +#[derive(Debug, Serialize, Deserialize)] +pub struct SpaceTicket { + /// Capabilities for a space. + pub caps: Vec, + /// List of nodes to sync with. + pub nodes: Vec, +} + +/// Form to insert a new entry +#[derive(Debug)] +pub struct EntryForm { + /// The authorisation, either an exact capability, or a user id to select a capability for automatically. + pub auth: AuthForm, + /// The subspace, either exact or automatically set to the authorising user. + pub subspace_id: SubspaceForm, + /// The path + pub path: Path, + /// The timestamp, either exact or automatically set current time. + pub timestamp: TimestampForm, +} + +impl EntryForm { + /// Creates a new entry form with the specified user and path. + /// + /// The subspace will be set to the specified user id. + /// The timestamp will be set to the current system time. + /// To authorise the entry, any applicable capability issued to the specified user id + /// that covers this path will be used, or return an error if no such capability is available. + pub fn new(user: UserId, path: Path) -> Self { + Self { + auth: AuthForm::Any(user), + path, + subspace_id: Default::default(), + timestamp: Default::default(), + } + } + + // TODO: Add builder methods for auth, subspace_id, timestamp +} + /// Handle to a synchronization intent. /// /// The `IntentHandle` is a `Stream` of `Event`s. It *must* be progressed in a loop, @@ -212,25 +469,7 @@ impl IntentHandle { /// Note that successful completion of this future does not guarantee that all interests were /// fulfilled. pub async fn complete(&mut self) -> Result { - let mut complete = false; - let mut partial = false; - while let Some(event) = self.event_rx.next().await { - match event { - Event::ReconciledAll => complete = true, - Event::Reconciled { .. } => partial = true, - Event::Abort { error } => return Err(anyhow::anyhow!(error)), - _ => {} - } - } - let completion = if complete { - Completion::Complete - } else if partial { - Completion::Partial - } else { - Completion::Nothing - }; - - Ok(completion) + complete(&mut self.event_rx).await } /// Submit new synchronisation interests into the session. @@ -258,3 +497,70 @@ impl Stream for IntentHandle { Pin::new(&mut self.event_rx).poll_next(cx) } } + +async fn complete(event_rx: &mut EventReceiver) -> Result { + let mut complete = false; + let mut partial = false; + while let Some(event) = event_rx.next().await { + match event { + Event::ReconciledAll => complete = true, + Event::Reconciled { .. } => partial = true, + Event::Abort { error } => return Err(anyhow::anyhow!(error)), + _ => {} + } + } + let completion = if complete { + Completion::Complete + } else if partial { + Completion::Partial + } else { + Completion::Nothing + }; + + Ok(completion) +} + +/// Merges synchronisation intent handles into one struct. +#[derive(Default, derive_more::Debug)] +#[debug("MergedIntentHandle({:?})", self.event_rx.keys().collect::>())] +pub struct MergedIntentHandle { + event_rx: StreamMap, + update_tx: HashMap, +} + +impl MergedIntentHandle { + /// Add an intent to this merged handle. + pub fn insert(&mut self, peer: NodeId, handle: IntentHandle) { + let (update_tx, event_rx) = handle.split(); + self.event_rx.insert(peer, event_rx); + self.update_tx.insert(peer, update_tx); + } + + /// Submit new synchronisation interests into all sessions. + pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { + let interests: Interests = interests.into(); + let futs = self + .update_tx + .values_mut() + .map(|tx| tx.send(IntentUpdate::AddInterests(interests.clone()))); + futures_buffered::try_join_all(futs).await?; + Ok(()) + } + + /// Wait for all intents to complete. + pub async fn complete_all(mut self) -> HashMap> { + let streams = self.event_rx.iter_mut(); + let futs = + streams.map(|(node_id, stream)| async move { (*node_id, complete(stream).await) }); + let res = futures_buffered::join_all(futs).await; + res.into_iter().collect() + } +} + +impl Stream for MergedIntentHandle { + type Item = (NodeId, Event); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.event_rx).poll_next(cx) + } +} diff --git a/iroh/src/node.rs b/iroh/src/node.rs index ac699da00c..23c1447685 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -498,6 +498,11 @@ impl NodeInner { } }; + // Shutdown willow gracefully. + if let Err(error) = self.willow.clone().shutdown().await { + warn!(?error, "Error while shutting down willow"); + } + // We ignore all errors during shutdown. let _ = tokio::join!( // Close the endpoint. diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 8bfd488659..4bbf8ab387 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -773,6 +773,10 @@ impl ProtocolBuilder { self = self.accept(DOCS_ALPN, Arc::new(docs)); } + // TODO: Make willow optional. + let willow_engine = self.inner.willow.clone(); + self = self.accept(iroh_willow::ALPN, Arc::new(willow_engine)); + self } diff --git a/iroh/src/node/protocol.rs b/iroh/src/node/protocol.rs index aef3b170f4..4ca0633d1f 100644 --- a/iroh/src/node/protocol.rs +++ b/iroh/src/node/protocol.rs @@ -110,3 +110,9 @@ impl ProtocolHandler for iroh_gossip::net::Gossip { Box::pin(async move { self.handle_connection(conn.await?).await }) } } + +impl ProtocolHandler for iroh_willow::Engine { + fn accept(self: Arc, conn: Connecting) -> BoxedFuture> { + Box::pin(async move { self.handle_connection(conn.await?).await }) + } +} diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index 2415d3c4a6..cc83bee73a 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -3,6 +3,7 @@ use futures_lite::Stream; use futures_util::SinkExt; use futures_util::StreamExt; use iroh_base::rpc::{RpcError, RpcResult}; +use iroh_willow::form::EntryOrForm; use iroh_willow::Engine; use quic_rpc::server::{RpcChannel, RpcServerError}; use tokio::sync::mpsc; @@ -28,17 +29,30 @@ pub(crate) async fn handle_rpc_request( engine .ingest_entry(req.authorised_entry) .await - .map(|_| IngestEntryResponse) + .map(|inserted| { + if inserted { + IngestEntrySuccess::Inserted + } else { + IngestEntrySuccess::Obsolete + } + }) .map_err(map_err) }) .await } InsertEntry(msg) => { chan.rpc(msg, engine, |engine, req| async move { + let entry = EntryOrForm::Form(req.entry.into()); engine - .insert_entry(req.entry, req.auth) + .insert_entry(entry, req.auth) .await - .map(|_| InsertEntryResponse) + .map(|(entry, inserted)| { + if inserted { + InsertEntrySuccess::Inserted(entry) + } else { + InsertEntrySuccess::Obsolete + } + }) .map_err(map_err) }) .await @@ -59,7 +73,17 @@ pub(crate) async fn handle_rpc_request( .get_entries(req.namespace, req.range) .await .map_err(map_err)?; - Ok(stream.map(|res| res.map(|e| GetEntriesResponse(e.into())).map_err(map_err))) + Ok(stream.map(|res| res.map(|e| GetEntriesResponse(e)).map_err(map_err))) + }) + .await + } + GetEntry(msg) => { + chan.rpc(msg, engine, |engine, req| async move { + engine + .get_entry(req.namespace, req.subspace, req.path) + .await + .map(|entry| GetEntryResponse(entry.map(Into::into))) + .map_err(map_err) }) .await } diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs index dac5714adf..fea876f3a0 100644 --- a/iroh/src/rpc_protocol/spaces.rs +++ b/iroh/src/rpc_protocol/spaces.rs @@ -1,12 +1,15 @@ use iroh_base::rpc::{RpcError, RpcResult}; use iroh_net::NodeId; use iroh_willow::{ - form::{AuthForm, SerdeEntryOrForm}, + form::{AuthForm, SerdeEntryForm}, interest::{CapSelector, CapabilityPack, DelegateTo}, proto::{ - data_model::{self, serde_encoding::SerdeEntry, AuthorisedEntry}, + data_model::{ + self, serde_encoding::SerdeAuthorisedEntry, AuthorisedEntry, NamespaceId, Path, + SubspaceId, + }, grouping::{self, Range3d}, - keys::{NamespaceId, NamespaceKind, UserId}, + keys::{NamespaceKind, UserId}, meadowcap::{self, AccessMode, SecretKey}, }, session::{ @@ -25,14 +28,16 @@ use super::RpcService; #[enum_conversions(super::Request)] #[rpc_requests(RpcService)] pub enum Request { - #[rpc(response = RpcResult)] + #[rpc(response = RpcResult)] IngestEntry(IngestEntryRequest), - #[rpc(response = RpcResult)] + #[rpc(response = RpcResult)] InsertEntry(InsertEntryRequest), #[rpc(response = RpcResult)] InsertSecret(InsertSecretRequest), #[try_server_streaming(create_error = RpcError, item_error = RpcError, item = GetEntriesResponse)] GetEntries(GetEntriesRequest), + #[rpc(response = RpcResult)] + GetEntry(GetEntryRequest), #[rpc(response = RpcResult)] CreateNamespace(CreateNamespaceRequest), #[rpc(response = RpcResult)] @@ -52,10 +57,11 @@ pub enum Request { #[derive(strum::Display, Debug, Serialize, Deserialize)] #[enum_conversions(super::Response)] pub enum Response { - IngestEntry(RpcResult), - InsertEntry(RpcResult), + IngestEntry(RpcResult), + InsertEntry(RpcResult), InsertSecret(RpcResult), GetEntries(RpcResult), + GetEntry(RpcResult), CreateNamespace(RpcResult), CreateUser(RpcResult), DelegateCaps(RpcResult), @@ -70,17 +76,38 @@ pub struct IngestEntryRequest { pub authorised_entry: AuthorisedEntry, } -#[derive(Debug, Serialize, Deserialize)] -pub struct IngestEntryResponse; - #[derive(Debug, Serialize, Deserialize)] pub struct InsertEntryRequest { - pub entry: SerdeEntryOrForm, + pub entry: SerdeEntryForm, pub auth: AuthForm, } #[derive(Debug, Serialize, Deserialize)] -pub struct InsertEntryResponse; +pub enum InsertEntrySuccess { + Inserted(#[serde(with = "data_model::serde_encoding::authorised_entry")] AuthorisedEntry), + Obsolete, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum IngestEntrySuccess { + Inserted, + Obsolete, +} + +impl InsertEntrySuccess { + /// Returns the inserted entry, or an error if the entry was not inserted + /// because it is obsoleted by a newer entry. + pub fn inserted(self) -> Result { + match self { + Self::Inserted(entry) => Ok(entry), + Self::Obsolete => Err(EntryObsoleteError), + } + } +} + +#[derive(Debug, thiserror::Error)] +#[error("The entry was not inserted because a newer entry exists.")] +pub struct EntryObsoleteError; #[derive(Debug, Serialize, Deserialize)] pub struct InsertSecretRequest { @@ -98,7 +125,22 @@ pub struct GetEntriesRequest { } #[derive(Debug, Serialize, Deserialize)] -pub struct GetEntriesResponse(pub SerdeEntry); +pub struct GetEntriesResponse( + #[serde(with = "data_model::serde_encoding::authorised_entry")] pub AuthorisedEntry, +); + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetEntryRequest { + pub namespace: NamespaceId, + pub subspace: SubspaceId, + #[serde(with = "data_model::serde_encoding::path")] + pub path: Path, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetEntryResponse( + pub Option, // #[serde(with = "data_model::serde_encoding::authorised_entry")] pub AuthorisedEntry, +); #[derive(Debug, Serialize, Deserialize)] pub struct CreateNamespaceRequest { diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs new file mode 100644 index 0000000000..b927347a78 --- /dev/null +++ b/iroh/tests/spaces.rs @@ -0,0 +1,134 @@ +use anyhow::Result; +use futures_lite::StreamExt; +use iroh::client::{spaces::EntryForm, Iroh}; +use iroh_net::{key::SecretKey, NodeAddr}; +use iroh_willow::{ + interest::{CapSelector, DelegateTo, RestrictArea}, + proto::{ + data_model::{Path, PathExt}, + grouping::{Area, Range3d}, + keys::NamespaceKind, + meadowcap::AccessMode, + }, + session::intents::Completion, +}; +use tracing::info; + +/// Spawn an iroh node in a separate thread and tokio runtime, and return +/// the address and client. +async fn spawn_node() -> (NodeAddr, Iroh) { + let (sender, receiver) = tokio::sync::oneshot::channel(); + std::thread::spawn(move || { + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build()?; + runtime.block_on(async move { + let secret_key = SecretKey::generate(); + let node = iroh::node::Builder::default() + .secret_key(secret_key) + .relay_mode(iroh_net::relay::RelayMode::Disabled) + .node_discovery(iroh::node::DiscoveryConfig::None) + .spawn() + .await?; + let addr = node.net().node_addr().await?; + sender.send((addr, node.client().clone())).unwrap(); + node.cancel_token().cancelled().await; + anyhow::Ok(()) + })?; + anyhow::Ok(()) + }); + receiver.await.unwrap() +} + +#[tokio::test] +async fn spaces_smoke() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let (alfie_addr, alfie) = spawn_node().await; + let (betty_addr, betty) = spawn_node().await; + info!("alfie is {}", alfie_addr.node_id.fmt_short()); + info!("betty is {}", betty_addr.node_id.fmt_short()); + + let betty_user = betty.spaces().create_user().await?; + let alfie_user = alfie.spaces().create_user().await?; + let alfie_space = alfie + .spaces() + .create(NamespaceKind::Owned, alfie_user) + .await?; + + let namespace = alfie_space.namespace_id(); + + alfie_space + .insert_bytes( + EntryForm::new(alfie_user, Path::from_bytes(&[b"foo", b"bar"])?), + "hello betty", + ) + .await?; + alfie_space + .insert_bytes( + EntryForm::new(alfie_user, Path::from_bytes(&[b"foo", b"boo"])?), + "this is alfie", + ) + .await?; + + let ticket = alfie_space + .share(betty_user, AccessMode::Read, RestrictArea::None) + .await?; + + println!("ticket {ticket:?}"); + let (betty_space, betty_sync_intent) = betty.spaces().import_and_sync(ticket).await?; + + let mut completion = betty_sync_intent.complete_all().await; + assert_eq!(completion.len(), 1); + let alfie_completion = completion.remove(&alfie_addr.node_id).unwrap(); + assert_eq!(alfie_completion?, Completion::Complete); + + let betty_entries: Vec<_> = betty_space + .get_many(Range3d::new_full()) + .await? + .try_collect() + .await?; + assert_eq!(betty_entries.len(), 2); + + let res = betty_space + .insert_bytes( + EntryForm::new(betty_user, Path::from_bytes(&[b"hello"])?), + "this is betty", + ) + .await; + println!("insert without cap: {res:?}"); + assert!(res.is_err()); + + let area = Area::new_subspace(betty_user); + let caps = alfie + .spaces() + .delegate_caps( + CapSelector::any(namespace), + AccessMode::Write, + DelegateTo::new(betty_user, RestrictArea::Restrict(area)), + ) + .await?; + betty.spaces().import_caps(caps).await?; + + let res = betty_space + .insert_bytes( + EntryForm::new(betty_user, Path::from_bytes(&[b"hello"])?), + "this is betty", + ) + .await; + assert!(res.is_ok()); + + alfie.net().add_node_addr(betty_addr.clone()).await?; + let mut alfie_sync_intent = alfie_space + .sync_once(betty_addr.node_id, Default::default()) + .await?; + alfie_sync_intent.complete().await?; + + let alfie_entries: Vec<_> = alfie_space + .get_many(Range3d::new_full()) + .await? + .try_collect() + .await?; + assert_eq!(alfie_entries.len(), 3); + + Ok(()) +} From a4020e7bbb739d121067beb3e9d631c780d2ebf8 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 11:52:57 +0200 Subject: [PATCH 168/198] chore: clippy --- iroh-willow/src/proto/data_model.rs | 2 +- iroh-willow/src/store/auth.rs | 8 ++++---- iroh-willow/src/store/memory.rs | 4 ++-- iroh/src/client/spaces.rs | 7 +++---- iroh/src/node/rpc/spaces.rs | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 1c04982120..4286d059aa 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -303,7 +303,7 @@ pub mod serde_encoding { let (entry, capability, signature): (SerdeEntry, SerdeMcCapability, UserSignature) = Deserialize::deserialize(deserializer)?; let token = AuthorisationToken::new(capability.0, signature); - Ok(AuthorisedEntry::new(entry.0, token).map_err(de::Error::custom)?) + AuthorisedEntry::new(entry.0, token).map_err(de::Error::custom) } } diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 3a65ca1dcc..6e2142276a 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -154,7 +154,7 @@ impl Auth { McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Read)? }; // TODO: Subspace capability. - let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None).into()); + let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); Ok(pack) } @@ -177,7 +177,7 @@ impl Auth { AccessMode::Write, )? }; - let pack = CapabilityPack::Write(cap.into()); + let pack = CapabilityPack::Write(cap); Ok(pack) } @@ -237,7 +237,7 @@ impl Auth { None }; let pack = - CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap).into()); + CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); Ok(pack) } @@ -254,7 +254,7 @@ impl Auth { .ok_or(AuthError::MissingUserSecret(*cap.receiver()))?; let area = restrict_area.or_default(cap.granted_area()); let new_cap = cap.delegate(&user_secret, &to, &area)?; - Ok(CapabilityPack::Write(new_cap.into())) + Ok(CapabilityPack::Write(new_cap)) } } diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index c15aa9ca5a..4ccc46a8af 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -348,13 +348,13 @@ impl CapsStore { self.read_caps .entry(*cap.read_cap().granted_namespace()) .or_default() - .push(cap.into()); + .push(cap); } CapabilityPack::Write(cap) => { self.write_caps .entry(*cap.granted_namespace()) .or_default() - .push(cap.into()); + .push(cap); } } } diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index a183302896..02fdf35859 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -7,6 +7,8 @@ //! //! [Willow]: https://willowprotocol.org/ +// TODO: Reexport everything that is needed from iroh_willow. + use std::{ collections::HashMap, path::PathBuf, @@ -196,10 +198,7 @@ impl Space { payload, }; let auth = entry.auth; - let req = InsertEntryRequest { - entry: form.into(), - auth, - }; + let req = InsertEntryRequest { entry: form, auth }; let res = self.rpc.rpc(req).await??; Ok(res) } diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index cc83bee73a..a1ec2b1bed 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -73,7 +73,7 @@ pub(crate) async fn handle_rpc_request( .get_entries(req.namespace, req.range) .await .map_err(map_err)?; - Ok(stream.map(|res| res.map(|e| GetEntriesResponse(e)).map_err(map_err))) + Ok(stream.map(|res| res.map(GetEntriesResponse).map_err(map_err))) }) .await } From 224b1689c554fc654662860781523cedf692b820 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 12:00:18 +0200 Subject: [PATCH 169/198] cleanup --- iroh-willow/src/form.rs | 59 +-------------------------------- iroh/src/client/spaces.rs | 39 ++++++++++++++-------- iroh/src/node/rpc/spaces.rs | 6 ++-- iroh/src/rpc_protocol/spaces.rs | 55 ++++++++++++++++++++++++------ 4 files changed, 74 insertions(+), 85 deletions(-) diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index 6f36267377..c93f28914d 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; use crate::proto::{ - data_model::{self, Entry, NamespaceId, Path, SubspaceId, Timestamp}, + data_model::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, keys::UserId, meadowcap::{self, WriteCapability}, }; @@ -157,60 +157,3 @@ pub enum TimestampForm { /// Set the timestamp to the provided value. Exact(Timestamp), } - -/// Either a [`Entry`] or a [`EntryForm`]. -#[derive(Debug, Serialize, Deserialize)] -pub enum SerdeEntryOrForm { - Entry(#[serde(with = "data_model::serde_encoding::entry")] Entry), - Form(SerdeEntryForm), -} - -impl From for EntryOrForm { - fn from(value: SerdeEntryOrForm) -> Self { - match value { - SerdeEntryOrForm::Entry(entry) => EntryOrForm::Entry(entry), - SerdeEntryOrForm::Form(form) => EntryOrForm::Form(form.into()), - } - } -} - -/// Creates an entry while setting some fields automatically. -#[derive(Debug, Serialize, Deserialize)] -pub struct SerdeEntryForm { - pub namespace_id: NamespaceId, - pub subspace_id: SubspaceForm, - #[serde(with = "data_model::serde_encoding::path")] - pub path: Path, - pub timestamp: TimestampForm, - pub payload: PayloadForm2, -} - -/// -#[derive(Debug, Serialize, Deserialize)] -pub enum PayloadForm2 { - /// Make sure the hash is available in the blob store, and use the length from the blob store. - Checked(Hash), - /// Insert with the specified hash and length, without checking if the blob is in the local blob store. - Unchecked(Hash, u64), -} - -impl From for PayloadForm { - fn from(value: PayloadForm2) -> Self { - match value { - PayloadForm2::Checked(hash) => PayloadForm::Hash(hash), - PayloadForm2::Unchecked(hash, len) => PayloadForm::HashUnchecked(hash, len), - } - } -} - -impl From for EntryForm { - fn from(value: SerdeEntryForm) -> Self { - EntryForm { - namespace_id: value.namespace_id, - subspace_id: value.subspace_id, - path: value.path, - timestamp: value.timestamp, - payload: value.payload.into(), - } - } -} diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 02fdf35859..dd0d23f2d5 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -24,7 +24,7 @@ use iroh_base::key::NodeId; use iroh_blobs::Hash; use iroh_net::NodeAddr; use iroh_willow::{ - form::{AuthForm, PayloadForm2, SerdeEntryForm, SubspaceForm, TimestampForm}, + form::{AuthForm, SubspaceForm, TimestampForm}, interest::{ AreaOfInterestSelector, CapSelector, CapabilityPack, DelegateTo, Interests, RestrictArea, }, @@ -189,8 +189,8 @@ impl Space { self.namespace_id } - async fn insert(&self, entry: EntryForm, payload: PayloadForm2) -> Result { - let form = SerdeEntryForm { + async fn insert(&self, entry: EntryForm, payload: PayloadForm) -> Result { + let form = FullEntryForm { namespace_id: self.namespace_id, subspace_id: entry.subspace_id, path: entry.path, @@ -203,18 +203,11 @@ impl Space { Ok(res) } - // Insert a new entry. - - // `entry` can be a [`EntryForm`] or a `Entry`. - // `auth` can either be a [`AuthForm`] or simply a [`UserId`]. - // When passing a [`UserId`], a matching capability will be selected for the user. - // If you want to select the capability to use more specifically, use the methods on [`AuthForm`]. - // TODO: Not sure I like the impl Into, better change to two methods. - /// Inserts a new entry, with the payload digest set to a hash. + /// Inserts a new entry, with the payload set to the hash of a blob. /// /// Note that the payload must exist in the local blob store, otherwise the operation will fail. pub async fn insert_hash(&self, entry: EntryForm, payload: Hash) -> Result { - let payload = PayloadForm2::Checked(payload); + let payload = PayloadForm::Checked(payload); self.insert(entry, payload).await } @@ -364,12 +357,12 @@ impl Space { } /// TODO - pub async fn subscribe(&self, _area: Area) { + pub fn subscribe(&self, _area: Area) { todo!() } /// TODO - pub async fn subscribe_offset(&self, _area: Area, _offset: u64) { + pub fn subscribe_offset(&self, _area: Area, _offset: u64) { todo!() } } @@ -563,3 +556,21 @@ impl Stream for MergedIntentHandle { Pin::new(&mut self.event_rx).poll_next(cx) } } + +/// Options for setting the payload on the a new entry. +#[derive(Debug, Serialize, Deserialize)] +pub enum PayloadForm { + /// Make sure the hash is available in the blob store, and use the length from the blob store. + Checked(Hash), + /// Insert with the specified hash and length, without checking if the blob is in the local blob store. + Unchecked(Hash, u64), +} + +impl From for iroh_willow::form::PayloadForm { + fn from(value: PayloadForm) -> Self { + match value { + PayloadForm::Checked(hash) => Self::Hash(hash), + PayloadForm::Unchecked(hash, len) => Self::HashUnchecked(hash, len), + } + } +} diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index a1ec2b1bed..93c7f9d525 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -156,6 +156,7 @@ pub(crate) async fn handle_rpc_request( } } +// TODO: Try to use the streams directly instead of spawning two tasks. async fn sync_with_peer( engine: Engine, req: SyncWithPeerRequest, @@ -169,16 +170,17 @@ async fn sync_with_peer( let (mut update_sink, mut events) = handle.split(); tokio::task::spawn(async move { while let Some(update) = update_stream.next().await { - if let Err(_) = update_sink.send(update.0).await { + if update_sink.send(update.0).await.is_err() { break; } } }); tokio::task::spawn(async move { while let Some(event) = events.next().await { - if let Err(_) = events_tx + if events_tx .send(Ok(SyncWithPeerResponse::Event(event.into()))) .await + .is_err() { break; } diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs index fea876f3a0..ca9e9ee6a5 100644 --- a/iroh/src/rpc_protocol/spaces.rs +++ b/iroh/src/rpc_protocol/spaces.rs @@ -1,11 +1,11 @@ use iroh_base::rpc::{RpcError, RpcResult}; use iroh_net::NodeId; use iroh_willow::{ - form::{AuthForm, SerdeEntryForm}, + form::{AuthForm, SubspaceForm, TimestampForm}, interest::{CapSelector, CapabilityPack, DelegateTo}, proto::{ data_model::{ - self, serde_encoding::SerdeAuthorisedEntry, AuthorisedEntry, NamespaceId, Path, + self, serde_encoding::SerdeAuthorisedEntry, AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, }, grouping::{self, Range3d}, @@ -21,6 +21,8 @@ use nested_enum_utils::enum_conversions; use quic_rpc_derive::rpc_requests; use serde::{Deserialize, Serialize}; +use crate::client::spaces::PayloadForm; + use super::RpcService; #[allow(missing_docs)] @@ -78,7 +80,7 @@ pub struct IngestEntryRequest { #[derive(Debug, Serialize, Deserialize)] pub struct InsertEntryRequest { - pub entry: SerdeEntryForm, + pub entry: FullEntryForm, pub auth: AuthForm, } @@ -176,14 +178,6 @@ pub struct ImportCapsRequest { #[derive(Debug, Serialize, Deserialize)] pub struct ImportCapsResponse; -// #[derive(Debug, Serialize, Deserialize)] -// pub struct ResolveInterestsRequest { -// pub interests: Interests, -// } - -// #[derive(Debug, Serialize, Deserialize)] -// pub struct ResolveInterestsResponse(pub InterestMap); - #[derive(Debug, Serialize, Deserialize)] pub struct SyncWithPeerRequest { pub peer: NodeId, @@ -198,3 +192,42 @@ pub enum SyncWithPeerResponse { Started, Event(Event), } + +/// Either a [`Entry`] or a [`EntryForm`]. +#[derive(Debug, Serialize, Deserialize)] +pub enum EntryOrForm { + Entry(#[serde(with = "data_model::serde_encoding::entry")] Entry), + Form(FullEntryForm), +} + +impl From for iroh_willow::form::EntryOrForm { + fn from(value: EntryOrForm) -> Self { + match value { + EntryOrForm::Entry(entry) => Self::Entry(entry), + EntryOrForm::Form(form) => Self::Form(form.into()), + } + } +} + +/// Creates an entry while setting some fields automatically. +#[derive(Debug, Serialize, Deserialize)] +pub struct FullEntryForm { + pub namespace_id: NamespaceId, + pub subspace_id: SubspaceForm, + #[serde(with = "data_model::serde_encoding::path")] + pub path: Path, + pub timestamp: TimestampForm, + pub payload: PayloadForm, +} + +impl From for iroh_willow::form::EntryForm { + fn from(value: FullEntryForm) -> Self { + Self { + namespace_id: value.namespace_id, + subspace_id: value.subspace_id, + path: value.path, + timestamp: value.timestamp, + payload: value.payload.into(), + } + } +} From aef8618ee584639cb065b60732b13f89b03bdbf3 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 14:01:40 +0200 Subject: [PATCH 170/198] cleanups --- iroh-willow/src/interest.rs | 2 +- iroh-willow/src/proto/data_model.rs | 2 +- iroh-willow/src/proto/keys.rs | 4 ++-- iroh-willow/src/proto/wgps/handles.rs | 6 +++--- iroh-willow/src/session.rs | 2 -- iroh-willow/src/session/pai_finder.rs | 9 +++++---- iroh-willow/src/store.rs | 2 -- iroh-willow/src/util/channel.rs | 2 +- iroh/src/client/spaces.rs | 6 +++--- iroh/src/rpc_protocol/spaces.rs | 2 +- 10 files changed, 17 insertions(+), 20 deletions(-) diff --git a/iroh-willow/src/interest.rs b/iroh-willow/src/interest.rs index efe6464d4b..d6090433bd 100644 --- a/iroh-willow/src/interest.rs +++ b/iroh-willow/src/interest.rs @@ -29,7 +29,7 @@ pub enum Interests { } impl Interests { - /// Returns a [`SelectBuilder`] to build our [`Interests`]. + /// Returns a [`InterestBuilder`] to build our [`Interests`]. pub fn builder() -> InterestBuilder { InterestBuilder::default() } diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 4286d059aa..39895756d6 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -48,7 +48,7 @@ pub type Component<'a> = willow_data_model::Component<'a, MAX_COMPONENT_LENGTH>; /// A payload digest used in entries. /// -/// This wraps a [`Hash`] blake3 hash. +/// This wraps a [`iroh_blobs::Hash`] blake3 hash. #[derive( Debug, Clone, diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index 476e0db86a..a51c4579d2 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -65,9 +65,9 @@ impl IsCommunal for NamespacePublicKey { /// and if the last bit is zero it is an owned namespace. #[derive(Debug, Eq, PartialEq, Copy, Clone, Serialize, Deserialize)] pub enum NamespaceKind { - /// Communal namespace, needs [`super::meadowcap::CommunalCapability`] to authorizse. + /// Communal namespace. Communal, - /// Owned namespace, needs [`super::meadowcap::OwnedCapability`] to authorize. + /// Owned namespace. Owned, } diff --git a/iroh-willow/src/proto/wgps/handles.rs b/iroh-willow/src/proto/wgps/handles.rs index 0f4124250a..d7783df6a4 100644 --- a/iroh-willow/src/proto/wgps/handles.rs +++ b/iroh-willow/src/proto/wgps/handles.rs @@ -9,16 +9,16 @@ pub enum HandleType { /// * completed (both peers performed scalar multiplication). Intersection, - /// Resource handle for [`ReadCapability`] that certify access to some Entries. + /// Resource handle for [`crate::proto::meadowcap::ReadAuthorisation`] that certify access to some Entries. Capability, - /// Resource handle for [`AreaOfInterest`]s that peers wish to sync. + /// Resource handle for [`crate::proto::grouping::AreaOfInterest`]s that peers wish to sync. AreaOfInterest, /// Resource handle that controls the matching from Payload transmissions to Payload requests. PayloadRequest, - /// Resource handle for [`StaticToken`]s that peers need to transmit. + /// Resource handle for [`super::StaticToken`]s that peers need to transmit. StaticToken, } diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index ca27d9ac5d..2cf059e6c1 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -118,8 +118,6 @@ impl EventSender { } /// Events emitted from a session. -/// -/// These are handled in the [`PeerManager`](crate::engine::peer_manager::PeerManager). #[derive(derive_more::Debug)] pub(crate) enum SessionEvent { Established, diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 94a6c68d33..e2bcc5500d 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -2,13 +2,14 @@ //! //! As defined by the willow spec: [Private Area Intersection](https://willowprotocol.org/specs/pai/index.html) //! -//! Partly ported from the implementation in earthstar and willow: -//! -//! * https://github.com/earthstar-project/willow-js/blob/0db4b9ec7710fb992ab75a17bd8557040d9a1062/src/wgps/pai/pai_finder.ts -//! * https://github.com/earthstar-project/earthstar/blob/16d6d4028c22fdbb72f7395013b29be7dcd9217a/src/schemes/schemes.ts#L662 +//! Partly ported from the implementation in [earthstar] and [willow]. //! //! Licensed under LGPL and ported into this MIT/Apache codebase with explicit permission //! from the original author (gwil). +//! +//! [earthstar]: https://github.com/earthstar-project/willow-js/blob/0db4b9ec7710fb992ab75a17bd8557040d9a1062/src/wgps/pai/pai_finder.ts +//! [willow]: https://github.com/earthstar-project/earthstar/blob/16d6d4028c22fdbb72f7395013b29be7dcd9217a/src/schemes/schemes.ts#L662 +//! use std::collections::{HashMap, HashSet}; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index e32511ae62..ee8efdda41 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -1,7 +1,5 @@ //! Store for entries, secrets, and capabilities used in the Willow engine. //! -//! The [`Store`] is the high-level wrapper for the different stores we need. -//! //! The storage backend is defined in the [`Storage`] trait and its associated types. //! //! The only implementation is currently an in-memory store at [`memory`]. diff --git a/iroh-willow/src/util/channel.rs b/iroh-willow/src/util/channel.rs index b459a33c1a..a51f4fab31 100644 --- a/iroh-willow/src/util/channel.rs +++ b/iroh-willow/src/util/channel.rs @@ -115,7 +115,7 @@ impl Guarantees { /// Shared state for a in-memory pipe. /// -/// Roughly modeled after https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58 +/// Roughly modeled after [tokio](https://docs.rs/tokio/latest/src/tokio/io/util/mem.rs.html#58) #[derive(Debug)] struct Shared { inner: Mutex, diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index dd0d23f2d5..29bab285ac 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -245,13 +245,13 @@ impl Space { } /// Inserts a new entry, with the payload imported from a file. - pub async fn insert_from_path( + pub async fn insert_from_file( &self, entry: EntryForm, - payload: PathBuf, + file_path: PathBuf, ) -> Result { let batch = self.blobs().batch().await?; - let (tag, _len) = batch.add_file(payload).await?; + let (tag, _len) = batch.add_file(file_path).await?; self.insert_hash(entry, *tag.hash()).await } diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs index ca9e9ee6a5..30c347ec1f 100644 --- a/iroh/src/rpc_protocol/spaces.rs +++ b/iroh/src/rpc_protocol/spaces.rs @@ -193,7 +193,7 @@ pub enum SyncWithPeerResponse { Event(Event), } -/// Either a [`Entry`] or a [`EntryForm`]. +/// Either a complete [`Entry`] or a [`FullEntryForm`]. #[derive(Debug, Serialize, Deserialize)] pub enum EntryOrForm { Entry(#[serde(with = "data_model::serde_encoding::entry")] Entry), From 05312385c2f6e2f76c0930b6c019d55582702e9a Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 14:41:19 +0200 Subject: [PATCH 171/198] refactor: more cleanups and renames, improve SyncIntentSet --- iroh/src/client/spaces.rs | 214 +++++++++++++++++++++----------- iroh/src/rpc_protocol/spaces.rs | 21 +++- 2 files changed, 160 insertions(+), 75 deletions(-) diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 29bab285ac..22f927abdb 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -13,10 +13,10 @@ use std::{ collections::HashMap, path::PathBuf, pin::Pin, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; -use anyhow::Result; +use anyhow::{anyhow, Result}; use bytes::Bytes; use futures_lite::{Stream, StreamExt}; use futures_util::{Sink, SinkExt}; @@ -42,11 +42,14 @@ use iroh_willow::{ use ref_cast::RefCast; use serde::{Deserialize, Serialize}; use tokio::io::AsyncRead; -use tokio_stream::StreamMap; +use tokio_stream::{StreamMap, StreamNotifyClose}; use crate::client::RpcClient; + use crate::rpc_protocol::spaces::*; +pub use crate::rpc_protocol::spaces::PayloadForm; + /// Iroh Willow client. #[derive(Debug, Clone, RefCast)] #[repr(transparent)] @@ -98,10 +101,7 @@ impl Client { } /// Import a ticket and start to synchronize. - pub async fn import_and_sync( - &self, - ticket: SpaceTicket, - ) -> Result<(Space, MergedIntentHandle)> { + pub async fn import_and_sync(&self, ticket: SpaceTicket) -> Result<(Space, SyncIntentSet)> { if ticket.caps.is_empty() { anyhow::bail!("Invalid ticket: Does not include any capabilities"); } @@ -114,19 +114,19 @@ impl Client { self.import_caps(ticket.caps).await?; let interests = Interests::builder().add_full_cap(CapSelector::any(namespace)); let init = SessionInit::reconcile_once(interests); - let mut intents = MergedIntentHandle::default(); + let mut intents = SyncIntentSet::default(); for addr in ticket.nodes { let node_id = addr.node_id; self.net().add_node_addr(addr).await?; let intent = self.sync_with_peer(node_id, init.clone()).await?; - intents.insert(node_id, intent); + intents.insert(node_id, intent)?; } let space = Space::new(self.rpc.clone(), namespace); Ok((space, intents)) } /// Synchronize with a peer. - pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { let req = SyncWithPeerRequest { peer, init }; let (update_tx, event_rx) = self.rpc.bidi(req).await?; @@ -147,7 +147,7 @@ impl Client { }, })); - Ok(IntentHandle::new(update_tx, event_rx)) + Ok(SyncIntent::new(update_tx, event_rx, Default::default())) } /// Import a secret into the Willow store. @@ -304,7 +304,7 @@ impl Space { &self, node: NodeId, areas: AreaOfInterestSelector, - ) -> Result { + ) -> Result { let cap = CapSelector::any(self.namespace_id); let interests = Interests::builder().add(cap, areas); let init = SessionInit::reconcile_once(interests); @@ -324,7 +324,7 @@ impl Space { &self, node: NodeId, areas: AreaOfInterestSelector, - ) -> Result { + ) -> Result { let cap = CapSelector::any(self.namespace_id); let interests = Interests::builder().add(cap, areas); let init = SessionInit::continuous(interests); @@ -420,11 +420,12 @@ impl EntryForm { // to cross the RPC boundary. Maybe look into making the main iroh_willow Error type // serializable instead. #[derive(derive_more::Debug)] -pub struct IntentHandle { +pub struct SyncIntent { #[debug("UpdateSender")] update_tx: UpdateSender, #[debug("EventReceiver")] event_rx: EventReceiver, + state: SyncProgress, } /// Sends updates into a reconciliation intent. @@ -434,19 +435,20 @@ pub type UpdateSender = Pin + /// Receives events for a reconciliation intent. /// -/// Can be obtained from [`IntentHandle::split`]. +/// Can be obtained from [`SyncIntent::split`]. pub type EventReceiver = Pin + Send + 'static>>; -impl IntentHandle { - /// Creates a new `IntentHandle` with the given update sender and event receiver. - fn new(update_tx: UpdateSender, event_rx: EventReceiver) -> Self { +impl SyncIntent { + /// Creates a new `SyncIntent` with the given update sender and event receiver. + fn new(update_tx: UpdateSender, event_rx: EventReceiver, state: SyncProgress) -> Self { Self { update_tx, event_rx, + state, } } - /// Splits the `IntentHandle` into a update sender sink and event receiver stream. + /// Splits the `SyncIntent` into a update sender sink and event receiver stream. /// /// The intent will be dropped once both the sender and receiver are dropped. pub fn split(self) -> (UpdateSender, EventReceiver) { @@ -461,12 +463,19 @@ impl IntentHandle { /// Note that successful completion of this future does not guarantee that all interests were /// fulfilled. pub async fn complete(&mut self) -> Result { - complete(&mut self.event_rx).await + let mut state = SyncProgress::default(); + while let Some(event) = self.event_rx.next().await { + state.handle_event(&event); + if state.is_ready() { + break; + } + } + state.into_completion() } /// Submit new synchronisation interests into the session. /// - /// The `IntentHandle` will then receive events for these interests in addition to already + /// The `SyncIntent` will then receive events for these interests in addition to already /// submitted interests. pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { self.update_tx @@ -482,95 +491,154 @@ impl IntentHandle { // } } -impl Stream for IntentHandle { +impl Stream for SyncIntent { type Item = Event; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.event_rx).poll_next(cx) + Poll::Ready(match ready!(Pin::new(&mut self.event_rx).poll_next(cx)) { + None => None, + Some(event) => { + self.state.handle_event(&event); + Some(event) + } + }) } } -async fn complete(event_rx: &mut EventReceiver) -> Result { - let mut complete = false; - let mut partial = false; - while let Some(event) = event_rx.next().await { +/// Completion state for a [`SyncIntent`]. +#[derive(Debug, Default)] +pub struct SyncProgress { + partial: bool, + complete: bool, + failed: Option, +} +impl SyncProgress { + fn handle_event(&mut self, event: &Event) { match event { - Event::ReconciledAll => complete = true, - Event::Reconciled { .. } => partial = true, - Event::Abort { error } => return Err(anyhow::anyhow!(error)), + Event::ReconciledAll => self.complete = true, + Event::Reconciled { .. } => self.partial = true, + Event::Abort { error } => self.failed = Some(error.clone()), _ => {} } } - let completion = if complete { - Completion::Complete - } else if partial { - Completion::Partial - } else { - Completion::Nothing - }; - Ok(completion) + fn is_ready(&self) -> bool { + self.complete == true || self.failed.is_some() + } + + fn into_completion(self) -> Result { + if let Some(error) = self.failed { + Err(anyhow!(error)) + } else if self.complete { + Ok(Completion::Complete) + } else if self.partial { + Ok(Completion::Partial) + } else { + Ok(Completion::Nothing) + } + } } /// Merges synchronisation intent handles into one struct. #[derive(Default, derive_more::Debug)] #[debug("MergedIntentHandle({:?})", self.event_rx.keys().collect::>())] -pub struct MergedIntentHandle { - event_rx: StreamMap, - update_tx: HashMap, +pub struct SyncIntentSet { + event_rx: StreamMap>, + intents: HashMap, +} + +#[derive(derive_more::Debug)] +struct SyncIntentState { + #[debug("UpdateSender")] + update_tx: UpdateSender, + state: SyncProgress, } -impl MergedIntentHandle { - /// Add an intent to this merged handle. - pub fn insert(&mut self, peer: NodeId, handle: IntentHandle) { - let (update_tx, event_rx) = handle.split(); - self.event_rx.insert(peer, event_rx); - self.update_tx.insert(peer, update_tx); +impl SyncIntentSet { + /// Add a sync intent to the set. + /// + /// Returns an error if there is already a sync intent for this peer in the set. + pub fn insert(&mut self, peer: NodeId, handle: SyncIntent) -> Result<(), IntentExistsError> { + if self.intents.contains_key(&peer) { + Err(IntentExistsError(peer)) + } else { + let SyncIntent { + update_tx, + event_rx, + state, + } = handle; + self.event_rx.insert(peer, StreamNotifyClose::new(event_rx)); + self.intents + .insert(peer, SyncIntentState { update_tx, state }); + Ok(()) + } + } + + /// Removes a sync intent from the set. + pub fn remove(&mut self, peer: &NodeId) -> Option { + self.event_rx.remove(peer).and_then(|event_rx| { + self.intents.remove(peer).map(|state| { + SyncIntent::new( + state.update_tx, + event_rx.into_inner().expect("unreachable"), + state.state, + ) + }) + }) } /// Submit new synchronisation interests into all sessions. pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { let interests: Interests = interests.into(); - let futs = self - .update_tx - .values_mut() - .map(|tx| tx.send(IntentUpdate::AddInterests(interests.clone()))); + let futs = self.intents.values_mut().map(|intent| { + intent + .update_tx + .send(IntentUpdate::AddInterests(interests.clone())) + }); futures_buffered::try_join_all(futs).await?; Ok(()) } /// Wait for all intents to complete. pub async fn complete_all(mut self) -> HashMap> { - let streams = self.event_rx.iter_mut(); - let futs = - streams.map(|(node_id, stream)| async move { (*node_id, complete(stream).await) }); + let futs = self.intents.drain().map(|(node_id, state)| { + let event_rx = self + .event_rx + .remove(&node_id) + .expect("unreachable") + .into_inner() + .expect("unreachable"); + async move { + let res = SyncIntent::new(state.update_tx, event_rx, state.state) + .complete() + .await; + (node_id, res) + } + }); let res = futures_buffered::join_all(futs).await; res.into_iter().collect() } } -impl Stream for MergedIntentHandle { +impl Stream for SyncIntentSet { type Item = (NodeId, Event); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.event_rx).poll_next(cx) - } -} - -/// Options for setting the payload on the a new entry. -#[derive(Debug, Serialize, Deserialize)] -pub enum PayloadForm { - /// Make sure the hash is available in the blob store, and use the length from the blob store. - Checked(Hash), - /// Insert with the specified hash and length, without checking if the blob is in the local blob store. - Unchecked(Hash, u64), -} - -impl From for iroh_willow::form::PayloadForm { - fn from(value: PayloadForm) -> Self { - match value { - PayloadForm::Checked(hash) => Self::Hash(hash), - PayloadForm::Unchecked(hash, len) => Self::HashUnchecked(hash, len), + loop { + match ready!(Pin::new(&mut self.event_rx).poll_next(cx)) { + None => break Poll::Ready(None), + Some((peer, Some(event))) => break Poll::Ready(Some((peer, event))), + Some((peer, None)) => { + self.intents.remove(&peer); + self.event_rx.remove(&peer); + continue; + } + } } } } + +/// Error returned when trying to insert a [`SyncIntent] into a [`SyncIntentSet] for a peer that is already in the set. +#[derive(Debug, thiserror::Error)] +#[error("The set already contains a sync intent for this peer.")] +pub struct IntentExistsError(pub NodeId); diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs index 30c347ec1f..067c44399e 100644 --- a/iroh/src/rpc_protocol/spaces.rs +++ b/iroh/src/rpc_protocol/spaces.rs @@ -1,4 +1,5 @@ use iroh_base::rpc::{RpcError, RpcResult}; +use iroh_blobs::Hash; use iroh_net::NodeId; use iroh_willow::{ form::{AuthForm, SubspaceForm, TimestampForm}, @@ -21,8 +22,6 @@ use nested_enum_utils::enum_conversions; use quic_rpc_derive::rpc_requests; use serde::{Deserialize, Serialize}; -use crate::client::spaces::PayloadForm; - use super::RpcService; #[allow(missing_docs)] @@ -231,3 +230,21 @@ impl From for iroh_willow::form::EntryForm { } } } + +/// Options for setting the payload on the a new entry. +#[derive(Debug, Serialize, Deserialize)] +pub enum PayloadForm { + /// Make sure the hash is available in the blob store, and use the length from the blob store. + Checked(Hash), + /// Insert with the specified hash and length, without checking if the blob is in the local blob store. + Unchecked(Hash, u64), +} + +impl From for iroh_willow::form::PayloadForm { + fn from(value: PayloadForm) -> Self { + match value { + PayloadForm::Checked(hash) => Self::Hash(hash), + PayloadForm::Unchecked(hash, len) => Self::HashUnchecked(hash, len), + } + } +} From 2c362b3369f264bb9fd3a31a765b414cc6a15b4e Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 15:01:55 +0200 Subject: [PATCH 172/198] more renames --- iroh/src/client/spaces.rs | 138 ++++++++++++++++++-------------------- iroh/src/lib.rs | 2 + 2 files changed, 69 insertions(+), 71 deletions(-) diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 22f927abdb..40bc1dcf1d 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -45,11 +45,8 @@ use tokio::io::AsyncRead; use tokio_stream::{StreamMap, StreamNotifyClose}; use crate::client::RpcClient; - use crate::rpc_protocol::spaces::*; -pub use crate::rpc_protocol::spaces::PayloadForm; - /// Iroh Willow client. #[derive(Debug, Clone, RefCast)] #[repr(transparent)] @@ -101,7 +98,7 @@ impl Client { } /// Import a ticket and start to synchronize. - pub async fn import_and_sync(&self, ticket: SpaceTicket) -> Result<(Space, SyncIntentSet)> { + pub async fn import_and_sync(&self, ticket: SpaceTicket) -> Result<(Space, SyncHandleSet)> { if ticket.caps.is_empty() { anyhow::bail!("Invalid ticket: Does not include any capabilities"); } @@ -114,7 +111,7 @@ impl Client { self.import_caps(ticket.caps).await?; let interests = Interests::builder().add_full_cap(CapSelector::any(namespace)); let init = SessionInit::reconcile_once(interests); - let mut intents = SyncIntentSet::default(); + let mut intents = SyncHandleSet::default(); for addr in ticket.nodes { let node_id = addr.node_id; self.net().add_node_addr(addr).await?; @@ -126,7 +123,7 @@ impl Client { } /// Synchronize with a peer. - pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { + pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { let req = SyncWithPeerRequest { peer, init }; let (update_tx, event_rx) = self.rpc.bidi(req).await?; @@ -147,7 +144,7 @@ impl Client { }, })); - Ok(SyncIntent::new(update_tx, event_rx, Default::default())) + Ok(SyncHandle::new(update_tx, event_rx, Default::default())) } /// Import a secret into the Willow store. @@ -293,8 +290,8 @@ impl Space { /// Syncs with a peer and quit the session after a single reconciliation of the selected areas. /// - /// Returns an [`IntentHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, - /// await [`IntentHandle::complete`]. + /// Returns an [`SyncHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, + /// await [`SyncHandle::complete`]. /// /// This will connect to the node, start a sync session, and submit all our capabilities for this namespace, /// constrained to the selected areas. @@ -304,7 +301,7 @@ impl Space { &self, node: NodeId, areas: AreaOfInterestSelector, - ) -> Result { + ) -> Result { let cap = CapSelector::any(self.namespace_id); let interests = Interests::builder().add(cap, areas); let init = SessionInit::reconcile_once(interests); @@ -313,8 +310,8 @@ impl Space { /// Sync with a peer and keep sending and receiving live updates for the selected areas. /// - /// Returns an [`IntentHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, - /// await [`IntentHandle::complete`]. + /// Returns an [`SyncHandle`] that emits events for the reconciliation. If you want to wait for everything to complete, + /// await [`SyncHandle::complete`]. /// /// This will connect to the node, start a sync session, and submit all our capabilities for this namespace, /// constrained to the selected areas. @@ -324,7 +321,7 @@ impl Space { &self, node: NodeId, areas: AreaOfInterestSelector, - ) -> Result { + ) -> Result { let cap = CapSelector::any(self.namespace_id); let interests = Interests::builder().add(cap, areas); let init = SessionInit::continuous(interests); @@ -376,51 +373,19 @@ pub struct SpaceTicket { pub nodes: Vec, } -/// Form to insert a new entry -#[derive(Debug)] -pub struct EntryForm { - /// The authorisation, either an exact capability, or a user id to select a capability for automatically. - pub auth: AuthForm, - /// The subspace, either exact or automatically set to the authorising user. - pub subspace_id: SubspaceForm, - /// The path - pub path: Path, - /// The timestamp, either exact or automatically set current time. - pub timestamp: TimestampForm, -} - -impl EntryForm { - /// Creates a new entry form with the specified user and path. - /// - /// The subspace will be set to the specified user id. - /// The timestamp will be set to the current system time. - /// To authorise the entry, any applicable capability issued to the specified user id - /// that covers this path will be used, or return an error if no such capability is available. - pub fn new(user: UserId, path: Path) -> Self { - Self { - auth: AuthForm::Any(user), - path, - subspace_id: Default::default(), - timestamp: Default::default(), - } - } - - // TODO: Add builder methods for auth, subspace_id, timestamp -} - /// Handle to a synchronization intent. /// -/// The `IntentHandle` is a `Stream` of `Event`s. It *must* be progressed in a loop, +/// The `SyncHandle` is a `Stream` of [`Event`]s. It *must* be progressed in a loop, /// otherwise the session will be blocked from progressing. /// -/// The `IntentHandle` can also submit new interests into the session. +/// The `SyncHandle` can also submit new interests into the session. /// -// This version of IntentHandle differs from the one in iroh-willow intents module +// This version of SyncHandle differs from the one in iroh-willow intents module // by using the Event type instead of EventKind, which serializes the error to a string // to cross the RPC boundary. Maybe look into making the main iroh_willow Error type // serializable instead. #[derive(derive_more::Debug)] -pub struct SyncIntent { +pub struct SyncHandle { #[debug("UpdateSender")] update_tx: UpdateSender, #[debug("EventReceiver")] @@ -430,16 +395,16 @@ pub struct SyncIntent { /// Sends updates into a reconciliation intent. /// -/// Can be obtained from [`IntentHandle::split`]. +/// Can be obtained from [`SyncHandle::split`]. pub type UpdateSender = Pin + Send + 'static>>; /// Receives events for a reconciliation intent. /// -/// Can be obtained from [`SyncIntent::split`]. +/// Can be obtained from [`SyncHandle::split`]. pub type EventReceiver = Pin + Send + 'static>>; -impl SyncIntent { - /// Creates a new `SyncIntent` with the given update sender and event receiver. +impl SyncHandle { + /// Creates a new `SyncHandle` with the given update sender and event receiver. fn new(update_tx: UpdateSender, event_rx: EventReceiver, state: SyncProgress) -> Self { Self { update_tx, @@ -448,7 +413,7 @@ impl SyncIntent { } } - /// Splits the `SyncIntent` into a update sender sink and event receiver stream. + /// Splits the `SyncHandle` into a update sender sink and event receiver stream. /// /// The intent will be dropped once both the sender and receiver are dropped. pub fn split(self) -> (UpdateSender, EventReceiver) { @@ -475,7 +440,7 @@ impl SyncIntent { /// Submit new synchronisation interests into the session. /// - /// The `SyncIntent` will then receive events for these interests in addition to already + /// The `SyncHandle` will then receive events for these interests in addition to already /// submitted interests. pub async fn add_interests(&mut self, interests: impl Into) -> Result<()> { self.update_tx @@ -491,7 +456,7 @@ impl SyncIntent { // } } -impl Stream for SyncIntent { +impl Stream for SyncHandle { type Item = Event; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -505,7 +470,7 @@ impl Stream for SyncIntent { } } -/// Completion state for a [`SyncIntent`]. +/// Completion state for a [`SyncHandle`]. #[derive(Debug, Default)] pub struct SyncProgress { partial: bool, @@ -541,44 +506,43 @@ impl SyncProgress { /// Merges synchronisation intent handles into one struct. #[derive(Default, derive_more::Debug)] -#[debug("MergedIntentHandle({:?})", self.event_rx.keys().collect::>())] -pub struct SyncIntentSet { +#[debug("MergedSyncHandle({:?})", self.event_rx.keys().collect::>())] +pub struct SyncHandleSet { event_rx: StreamMap>, - intents: HashMap, + intents: HashMap, } #[derive(derive_more::Debug)] -struct SyncIntentState { +struct HandleState { #[debug("UpdateSender")] update_tx: UpdateSender, state: SyncProgress, } -impl SyncIntentSet { +impl SyncHandleSet { /// Add a sync intent to the set. /// /// Returns an error if there is already a sync intent for this peer in the set. - pub fn insert(&mut self, peer: NodeId, handle: SyncIntent) -> Result<(), IntentExistsError> { + pub fn insert(&mut self, peer: NodeId, handle: SyncHandle) -> Result<(), IntentExistsError> { if self.intents.contains_key(&peer) { Err(IntentExistsError(peer)) } else { - let SyncIntent { + let SyncHandle { update_tx, event_rx, state, } = handle; self.event_rx.insert(peer, StreamNotifyClose::new(event_rx)); - self.intents - .insert(peer, SyncIntentState { update_tx, state }); + self.intents.insert(peer, HandleState { update_tx, state }); Ok(()) } } /// Removes a sync intent from the set. - pub fn remove(&mut self, peer: &NodeId) -> Option { + pub fn remove(&mut self, peer: &NodeId) -> Option { self.event_rx.remove(peer).and_then(|event_rx| { self.intents.remove(peer).map(|state| { - SyncIntent::new( + SyncHandle::new( state.update_tx, event_rx.into_inner().expect("unreachable"), state.state, @@ -609,7 +573,7 @@ impl SyncIntentSet { .into_inner() .expect("unreachable"); async move { - let res = SyncIntent::new(state.update_tx, event_rx, state.state) + let res = SyncHandle::new(state.update_tx, event_rx, state.state) .complete() .await; (node_id, res) @@ -620,7 +584,7 @@ impl SyncIntentSet { } } -impl Stream for SyncIntentSet { +impl Stream for SyncHandleSet { type Item = (NodeId, Event); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -638,7 +602,39 @@ impl Stream for SyncIntentSet { } } -/// Error returned when trying to insert a [`SyncIntent] into a [`SyncIntentSet] for a peer that is already in the set. +/// Error returned when trying to insert a [`SyncHandle`] into a [`SyncHandleSet] for a peer that is already in the set. #[derive(Debug, thiserror::Error)] #[error("The set already contains a sync intent for this peer.")] pub struct IntentExistsError(pub NodeId); + +/// Form to insert a new entry +#[derive(Debug)] +pub struct EntryForm { + /// The authorisation, either an exact capability, or a user id to select a capability for automatically. + pub auth: AuthForm, + /// The subspace, either exact or automatically set to the authorising user. + pub subspace_id: SubspaceForm, + /// The path + pub path: Path, + /// The timestamp, either exact or automatically set current time. + pub timestamp: TimestampForm, +} + +impl EntryForm { + /// Creates a new entry form with the specified user and path. + /// + /// The subspace will be set to the specified user id. + /// The timestamp will be set to the current system time. + /// To authorise the entry, any applicable capability issued to the specified user id + /// that covers this path will be used, or return an error if no such capability is available. + pub fn new(user: UserId, path: Path) -> Self { + Self { + auth: AuthForm::Any(user), + path, + subspace_id: Default::default(), + timestamp: Default::default(), + } + } + + // TODO: Add builder methods for auth, subspace_id, timestamp +} diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index 72957bc66b..69a921cc6f 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -102,6 +102,8 @@ pub use iroh_docs as docs; pub use iroh_gossip as gossip; #[doc(inline)] pub use iroh_net as net; +#[doc(inline)] +pub use iroh_willow as spaces; pub mod client; pub mod node; From 4faba249d1cd9bb35d7c88f38f514dd72d00dca8 Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 15:05:52 +0200 Subject: [PATCH 173/198] fix: less clones --- iroh/src/node/rpc.rs | 4 +--- iroh/src/node/rpc/spaces.rs | 13 ++----------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index a5f12ea9fc..0ee3751098 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -464,9 +464,7 @@ impl Handler { Authors(msg) => self.handle_authors_request(msg, chan).await, Docs(msg) => self.handle_docs_request(msg, chan).await, Gossip(msg) => self.handle_gossip_request(msg, chan).await, - Spaces(msg) => { - self::spaces::handle_rpc_request(self.inner.willow.clone(), msg, chan).await - } + Spaces(msg) => self::spaces::handle_rpc_request(&self.inner.willow, msg, chan).await, } } diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index 93c7f9d525..6acab9b8fa 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -18,7 +18,7 @@ fn map_err(err: anyhow::Error) -> RpcError { } pub(crate) async fn handle_rpc_request( - engine: Engine, + engine: &Engine, msg: Request, chan: RpcChannel, ) -> Result<(), RpcServerError> { @@ -127,20 +127,11 @@ pub(crate) async fn handle_rpc_request( }) .await } - // ResolveInterests(msg) => { - // chan.rpc(msg, engine, |engine, req| async move { - // engine - // .resolve_interests(req.interests) - // .await - // .map(ResolveInterestsResponse) - // .map_err(map_err) - // }) - // .await - // } SyncWithPeer(msg) => { chan.bidi_streaming(msg, engine, |engine, req, update_stream| { // TODO: refactor to use less tasks let (events_tx, events_rx) = tokio::sync::mpsc::channel(32); + let engine = engine.clone(); tokio::task::spawn(async move { if let Err(err) = sync_with_peer(engine, req, events_tx.clone(), update_stream).await From 3484f61496e76a7b3c736a9352defb3b9b918b3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Wed, 28 Aug 2024 15:59:50 +0200 Subject: [PATCH 174/198] fix(iroh-willow): Remove `flume` dependency (#2674) ## Description Purging `flume` as a dependency from `iroh-willow`. ## Breaking Changes ## Notes & open questions Well, there's the whole "make a last send in `impl Drop`" thing. ## Change checklist - [x] Self-review. - ~~[ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant.~~ - ~~[ ] Tests if relevant.~~ - [x] All breaking changes documented. --- Cargo.lock | 1 - iroh-willow/Cargo.toml | 1 - iroh-willow/src/engine/actor.rs | 45 +++++++++++++------- iroh-willow/src/session/pai_finder.rs | 61 +++++++++++++++------------ 4 files changed, 63 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c90f5255b..1f16da7874 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3019,7 +3019,6 @@ dependencies = [ "curve25519-dalek", "derive_more", "ed25519-dalek", - "flume", "futures-buffered", "futures-concurrency", "futures-lite 2.3.0", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index f34a66540e..4f522daf0d 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -20,7 +20,6 @@ bytes = { version = "1.4", features = ["serde"] } curve25519-dalek = { version = "4.1.3", features = [ "digest", "rand_core", "serde", ] } derive_more = { version = "1.0.0-beta.6", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } -flume = "0.11" futures-buffered = "0.2.6" futures-concurrency = "7.6.0" futures-lite = "2.3.0" diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index ab379a5fa5..e0bb99b526 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -7,6 +7,7 @@ use tokio::{ sync::{mpsc, oneshot}, task::JoinSet, }; +use tokio_stream::wrappers::ReceiverStream; use tracing::{debug, error, error_span, trace, warn, Instrument}; use crate::{ @@ -34,7 +35,7 @@ pub const SESSION_UPDATE_CHANNEL_CAP: usize = 64; /// Handle to a Willow storage thread. #[derive(Debug, Clone)] pub struct ActorHandle { - inbox_tx: flume::Sender, + inbox_tx: tokio::sync::mpsc::Sender, join_handle: Arc>>, } @@ -47,7 +48,7 @@ impl ActorHandle { create_store: impl 'static + Send + FnOnce() -> S, me: NodeId, ) -> ActorHandle { - let (inbox_tx, inbox_rx) = flume::bounded(INBOX_CAP); + let (inbox_tx, inbox_rx) = tokio::sync::mpsc::channel(INBOX_CAP); let join_handle = std::thread::Builder::new() .name("willow".to_string()) .spawn(move || { @@ -68,7 +69,7 @@ impl ActorHandle { } async fn send(&self, action: Input) -> Result<()> { - self.inbox_tx.send_async(action).await?; + self.inbox_tx.send(action).await?; Ok(()) } @@ -125,14 +126,14 @@ impl ActorHandle { namespace: NamespaceId, range: Range3d, ) -> Result>> { - let (tx, rx) = flume::bounded(1024); + let (tx, rx) = tokio::sync::mpsc::channel(1024); self.send(Input::GetEntries { namespace, reply: tx, range, }) .await?; - Ok(rx.into_stream()) + Ok(ReceiverStream::new(rx)) } pub(crate) async fn init_session( @@ -213,7 +214,21 @@ impl Drop for ActorHandle { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { let handle = handle.take().expect("can only drop once"); - self.inbox_tx.send(Input::Shutdown { reply: None }).ok(); + + match tokio::runtime::Handle::try_current() { + Ok(runtime) => { + let (dumb, _) = tokio::sync::mpsc::channel(1); + let inbox_tx = std::mem::replace(&mut self.inbox_tx, dumb); + runtime + .spawn(async move { inbox_tx.send(Input::Shutdown { reply: None }).await }); + } + Err(_) => { + self.inbox_tx + .blocking_send(Input::Shutdown { reply: None }) + .ok(); + } + } + if let Err(err) = handle.join() { warn!(?err, "Failed to join sync actor"); } @@ -232,7 +247,7 @@ pub enum Input { namespace: NamespaceId, range: Range3d, #[debug(skip)] - reply: flume::Sender>, + reply: mpsc::Sender>, }, IngestEntry { authorised_entry: AuthorisedEntry, @@ -279,14 +294,14 @@ pub enum Input { #[derive(Debug)] struct Actor { - inbox_rx: flume::Receiver, + inbox_rx: tokio::sync::mpsc::Receiver, store: Store, next_session_id: u64, tasks: JoinSet<()>, } impl Actor { - pub fn new(store: Store, inbox_rx: flume::Receiver) -> Self { + pub fn new(store: Store, inbox_rx: tokio::sync::mpsc::Receiver) -> Self { Self { store, inbox_rx, @@ -306,9 +321,9 @@ impl Actor { async fn run_async(mut self) -> Result<()> { loop { tokio::select! { - msg = self.inbox_rx.recv_async() => match msg { - Err(_) => break, - Ok(Input::Shutdown { reply }) => { + msg = self.inbox_rx.recv() => match msg { + None => break, + Some(Input::Shutdown { reply }) => { self.tasks.shutdown().await; drop(self); if let Some(reply) = reply { @@ -316,7 +331,7 @@ impl Actor { } break; } - Ok(msg) => { + Some(msg) => { if self.handle_message(msg).await.is_err() { warn!("failed to send reply: receiver dropped"); } @@ -379,12 +394,12 @@ impl Actor { } => { let snapshot = self.store.entries().snapshot(); match snapshot { - Err(err) => reply.send(Err(err)).map_err(send_reply_error), + Err(err) => reply.send(Err(err)).await.map_err(send_reply_error), Ok(snapshot) => { self.tasks.spawn_local(async move { let iter = snapshot.get_entries(namespace, &range); for entry in iter { - if reply.send_async(entry).await.is_err() { + if reply.send(entry).await.is_err() { break; } } diff --git a/iroh-willow/src/session/pai_finder.rs b/iroh-willow/src/session/pai_finder.rs index 94a6c68d33..ed10928c2d 100644 --- a/iroh-willow/src/session/pai_finder.rs +++ b/iroh-willow/src/session/pai_finder.rs @@ -503,6 +503,8 @@ mod tests { use futures_util::SinkExt; use rand_core::{CryptoRngCore, SeedableRng}; use tokio::task::{spawn_local, JoinHandle, LocalSet}; + use tokio_stream::wrappers::ReceiverStream; + use tokio_util::sync::PollSender; use tracing::{error_span, Instrument, Span}; use crate::{ @@ -536,15 +538,15 @@ mod tests { let auth_alfie = ReadAuthorisation::new_owned(&namespace_secret, alfie_public).unwrap(); let auth_betty = ReadAuthorisation::new_owned(&namespace_secret, betty_public).unwrap(); - let (alfie, betty) = Handle::create_two(); + let (mut alfie, mut betty) = Handle::create_two(); alfie.submit(auth_alfie.clone()).await; betty.submit(auth_betty.clone()).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; assert_eq!(alfie.next_intersection().await.authorisation, auth_alfie); assert_eq!(betty.next_intersection().await.authorisation, auth_betty); @@ -589,22 +591,22 @@ mod tests { .unwrap(); assert!(betty_auth.subspace_cap().is_some()); - let (alfie, betty) = Handle::create_two(); + let (mut alfie, mut betty) = Handle::create_two(); alfie.submit(alfie_auth.clone()).await; betty.submit(betty_auth.clone()).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; - transfer::(&alfie, &betty).await; - transfer::(&betty, &alfie).await; + transfer::(&mut alfie, &betty).await; + transfer::(&mut betty, &alfie).await; let next: PaiRequestSubspaceCapability = alfie.next_message().await; betty @@ -641,7 +643,10 @@ mod tests { (secret, public.id()) } - async fn transfer + Into>(from: &Handle, to: &Handle) { + async fn transfer + Into>( + from: &mut Handle, + to: &Handle, + ) { let message = from.next_message::().await; let message: IntersectionMessage = message.into(); to.receive(message).await; @@ -649,9 +654,10 @@ mod tests { struct Handle { task: JoinHandle>, - input: flume::Sender, - output: flume::Receiver, + input: tokio::sync::mpsc::Sender, + output: tokio::sync::mpsc::Receiver, } + impl Handle { pub fn create_two() -> (Self, Self) { ( @@ -661,13 +667,12 @@ mod tests { } pub fn new(span: Span) -> Self { - let (input, input_rx) = flume::bounded(1); - let (output_tx, output) = flume::bounded(1); + let (input, input_rx) = tokio::sync::mpsc::channel(1); + let (output_tx, output) = tokio::sync::mpsc::channel(1); input.try_send(Input::Established).expect("has capacity"); - let outbox = output_tx - .into_sink() - .sink_map_err(|_| Error::InvalidState("failed to send")); - let inbox = input_rx.into_stream(); + let outbox = + PollSender::new(output_tx).sink_map_err(|_| Error::InvalidState("failed to send")); + let inbox = ReceiverStream::new(input_rx); let task = spawn_local( async move { PaiFinder::run_with_sink(inbox, outbox).await }.instrument(span), ); @@ -679,7 +684,7 @@ mod tests { } pub async fn input(&self, input: Input) { - self.input.send_async(input).await.unwrap(); + self.input.send(input).await.unwrap(); } pub async fn submit(&self, auth: ReadAuthorisation) { @@ -690,18 +695,18 @@ mod tests { self.input(Input::ReceivedMessage(Ok(message.into()))).await } - pub async fn next(&self) -> Output { - self.output.recv_async().await.unwrap() + pub async fn next(&mut self) -> Output { + self.output.recv().await.unwrap() } - pub async fn next_intersection(&self) -> PaiIntersection { + pub async fn next_intersection(&mut self) -> PaiIntersection { match self.next().await { Output::NewIntersection(intersection) => intersection, out => panic!("expected NewIntersection but got {out:?}"), } } - pub async fn next_message>(&self) -> T { + pub async fn next_message>(&mut self) -> T { match self.next().await { Output::SendMessage(message) => { let dbg = format!("{}", message); From e3d4e3eff35c56cc8b000bb6f370661129351f9e Mon Sep 17 00:00:00 2001 From: Frando Date: Wed, 28 Aug 2024 17:04:27 +0200 Subject: [PATCH 175/198] refactor: make spaces/willow engine optional --- iroh/src/node.rs | 17 ++- iroh/src/node/builder.rs | 53 +++++-- iroh/src/node/rpc.rs | 10 +- iroh/src/node/rpc/spaces.rs | 269 +++++++++++++++++++----------------- 4 files changed, 206 insertions(+), 143 deletions(-) diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 23c1447685..954c4da76b 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -118,7 +118,7 @@ struct NodeInner { downloader: Downloader, blob_batches: tokio::sync::Mutex, local_pool_handle: LocalPoolHandle, - willow: iroh_willow::Engine, + willow: Option, } /// Keeps track of all the currently active batch operations of the blobs api. @@ -499,9 +499,16 @@ impl NodeInner { }; // Shutdown willow gracefully. - if let Err(error) = self.willow.clone().shutdown().await { - warn!(?error, "Error while shutting down willow"); - } + let spaces_shutdown = { + let engine = self.willow.clone(); + async move { + if let Some(engine) = engine { + if let Err(error) = engine.shutdown().await { + warn!(?error, "Error while shutting down willow"); + } + } + } + }; // We ignore all errors during shutdown. let _ = tokio::join!( @@ -514,6 +521,8 @@ impl NodeInner { .close(error_code.into(), error_code.reason()), // Shutdown docs engine. docs_shutdown, + // Shutdown spaces engine. + spaces_shutdown, // Shutdown blobs store engine. self.db.shutdown(), // Shutdown protocol handlers. diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 4bbf8ab387..1fd56ab9ce 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -67,6 +67,18 @@ pub enum DocsStorage { Persistent(PathBuf), } +/// Storage backend for spaces. +#[derive(Debug, Clone)] +pub enum SpacesStorage { + /// Disable docs completely. + Disabled, + /// In-memory storage. + Memory, + /// File-based persistent storage. + #[allow(unused)] + Persistent(PathBuf), +} + /// Builder for the [`Node`]. /// /// You must supply a blob store and a document store. @@ -108,6 +120,7 @@ where dns_resolver: Option, node_discovery: DiscoveryConfig, docs_storage: DocsStorage, + spaces_storage: SpacesStorage, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: bool, /// Callback to register when a gc loop is done @@ -235,6 +248,7 @@ impl Default for Builder { rpc_addr: None, gc_policy: GcPolicy::Disabled, docs_storage: DocsStorage::Memory, + spaces_storage: SpacesStorage::Memory, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -269,6 +283,8 @@ impl Builder { rpc_addr: None, gc_policy: GcPolicy::Disabled, docs_storage, + // TODO: Expose in function + spaces_storage: SpacesStorage::Disabled, node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -322,6 +338,8 @@ where dns_resolver: self.dns_resolver, gc_policy: self.gc_policy, docs_storage, + // TODO: Switch to SpacesStorage::Persistent once we have a store. + spaces_storage: SpacesStorage::Disabled, node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -376,6 +394,12 @@ where self } + /// Disables spaces support on this node completely. + pub fn disable_spaces(mut self) -> Self { + self.spaces_storage = SpacesStorage::Disabled; + self + } + /// Sets the relay servers to assist in establishing connectivity. /// /// Relay servers are used to discover other nodes by `PublicKey` and also help @@ -588,15 +612,24 @@ where ) .await?; + let willow = match self.spaces_storage { + SpacesStorage::Disabled => None, + SpacesStorage::Memory => { + let blobs_store = self.blobs_store.clone(); + let create_store = move || iroh_willow::store::memory::Store::new(blobs_store); + let engine = iroh_willow::Engine::spawn( + endpoint.clone(), + create_store, + iroh_willow::engine::AcceptOpts::default(), + ); + Some(engine) + } + SpacesStorage::Persistent(_) => { + unimplemented!("peristent storage for willow is not yet implemented") + } + }; // Spawn the willow engine. // TODO: Allow to disable. - let blobs_store = self.blobs_store.clone(); - let create_store = move || iroh_willow::store::memory::Store::new(blobs_store); - let willow = iroh_willow::Engine::spawn( - endpoint.clone(), - create_store, - iroh_willow::engine::AcceptOpts::default(), - ); // Initialize the internal RPC connection. let (internal_rpc, controller) = quic_rpc::transport::flume::connection::(32); @@ -773,9 +806,9 @@ impl ProtocolBuilder { self = self.accept(DOCS_ALPN, Arc::new(docs)); } - // TODO: Make willow optional. - let willow_engine = self.inner.willow.clone(); - self = self.accept(iroh_willow::ALPN, Arc::new(willow_engine)); + if let Some(engine) = self.inner.willow.clone() { + self = self.accept(iroh_willow::ALPN, Arc::new(engine)); + } self } diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index 0ee3751098..0c8ae93198 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -103,6 +103,10 @@ impl Handler { self.inner.docs.as_ref() } + fn spaces(&self) -> Result<&iroh_willow::Engine, RpcError> { + self.inner.willow.as_ref().ok_or_else(spaces_disabled) + } + async fn with_docs(self, f: F) -> RpcResult where T: Send + 'static, @@ -464,7 +468,7 @@ impl Handler { Authors(msg) => self.handle_authors_request(msg, chan).await, Docs(msg) => self.handle_docs_request(msg, chan).await, Gossip(msg) => self.handle_gossip_request(msg, chan).await, - Spaces(msg) => self::spaces::handle_rpc_request(&self.inner.willow, msg, chan).await, + Spaces(msg) => self.handle_spaces_request(msg, chan).await, } } @@ -1494,3 +1498,7 @@ where fn docs_disabled() -> RpcError { anyhow!("docs are disabled").into() } + +fn spaces_disabled() -> RpcError { + anyhow::anyhow!("spaces are disabled").into() +} diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index 6acab9b8fa..eb164c358e 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -3,8 +3,8 @@ use futures_lite::Stream; use futures_util::SinkExt; use futures_util::StreamExt; use iroh_base::rpc::{RpcError, RpcResult}; +use iroh_blobs::store::Store; use iroh_willow::form::EntryOrForm; -use iroh_willow::Engine; use quic_rpc::server::{RpcChannel, RpcServerError}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -13,147 +13,160 @@ use crate::node::IrohServerEndpoint; use crate::rpc_protocol::spaces::*; use crate::rpc_protocol::RpcService; +use super::Handler; + fn map_err(err: anyhow::Error) -> RpcError { RpcError::from(err) } -pub(crate) async fn handle_rpc_request( - engine: &Engine, - msg: Request, - chan: RpcChannel, -) -> Result<(), RpcServerError> { - use Request::*; - match msg { - IngestEntry(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .ingest_entry(req.authorised_entry) - .await - .map(|inserted| { - if inserted { - IngestEntrySuccess::Inserted - } else { - IngestEntrySuccess::Obsolete - } - }) - .map_err(map_err) - }) - .await - } - InsertEntry(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - let entry = EntryOrForm::Form(req.entry.into()); - engine - .insert_entry(entry, req.auth) - .await - .map(|(entry, inserted)| { - if inserted { - InsertEntrySuccess::Inserted(entry) - } else { - InsertEntrySuccess::Obsolete +impl Handler { + pub(crate) async fn handle_spaces_request( + self, + msg: Request, + chan: RpcChannel, + ) -> Result<(), RpcServerError> { + use Request::*; + match msg { + IngestEntry(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .ingest_entry(req.authorised_entry) + .await + .map(|inserted| { + if inserted { + IngestEntrySuccess::Inserted + } else { + IngestEntrySuccess::Obsolete + } + }) + .map_err(map_err) + }) + .await + } + InsertEntry(msg) => { + chan.rpc(msg, self, |handler, req| async move { + let entry = EntryOrForm::Form(req.entry.into()); + handler + .spaces()? + .insert_entry(entry, req.auth) + .await + .map(|(entry, inserted)| { + if inserted { + InsertEntrySuccess::Inserted(entry) + } else { + InsertEntrySuccess::Obsolete + } + }) + .map_err(map_err) + }) + .await + } + InsertSecret(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .insert_secret(req.secret) + .await + .map(|_| InsertSecretResponse) + .map_err(map_err) + }) + .await + } + GetEntries(msg) => { + chan.try_server_streaming(msg, self, |handler, req| async move { + let stream = handler + .spaces()? + .get_entries(req.namespace, req.range) + .await + .map_err(map_err)?; + Ok(stream.map(|res| res.map(GetEntriesResponse).map_err(map_err))) + }) + .await + } + GetEntry(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .get_entry(req.namespace, req.subspace, req.path) + .await + .map(|entry| GetEntryResponse(entry.map(Into::into))) + .map_err(map_err) + }) + .await + } + CreateNamespace(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .create_namespace(req.kind, req.owner) + .await + .map(CreateNamespaceResponse) + .map_err(map_err) + }) + .await + } + CreateUser(msg) => { + chan.rpc(msg, self, |handler, _| async move { + handler + .spaces()? + .create_user() + .await + .map(CreateUserResponse) + .map_err(map_err) + }) + .await + } + DelegateCaps(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .delegate_caps(req.from, req.access_mode, req.to) + .await + .map(DelegateCapsResponse) + .map_err(map_err) + }) + .await + } + ImportCaps(msg) => { + chan.rpc(msg, self, |handler, req| async move { + handler + .spaces()? + .import_caps(req.caps) + .await + .map(|_| ImportCapsResponse) + .map_err(map_err) + }) + .await + } + SyncWithPeer(msg) => { + chan.bidi_streaming(msg, self, |handler, req, update_stream| { + // TODO: refactor to use less tasks + let (events_tx, events_rx) = tokio::sync::mpsc::channel(32); + tokio::task::spawn(async move { + if let Err(err) = + sync_with_peer(handler, req, events_tx.clone(), update_stream).await + { + let _ = events_tx.send(Err(err.into())).await; } - }) - .map_err(map_err) - }) - .await - } - InsertSecret(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .insert_secret(req.secret) - .await - .map(|_| InsertSecretResponse) - .map_err(map_err) - }) - .await - } - GetEntries(msg) => { - chan.try_server_streaming(msg, engine, |engine, req| async move { - let stream = engine - .get_entries(req.namespace, req.range) - .await - .map_err(map_err)?; - Ok(stream.map(|res| res.map(GetEntriesResponse).map_err(map_err))) - }) - .await - } - GetEntry(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .get_entry(req.namespace, req.subspace, req.path) - .await - .map(|entry| GetEntryResponse(entry.map(Into::into))) - .map_err(map_err) - }) - .await - } - CreateNamespace(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .create_namespace(req.kind, req.owner) - .await - .map(CreateNamespaceResponse) - .map_err(map_err) - }) - .await - } - CreateUser(msg) => { - chan.rpc(msg, engine, |engine, _| async move { - engine - .create_user() - .await - .map(CreateUserResponse) - .map_err(map_err) - }) - .await - } - DelegateCaps(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .delegate_caps(req.from, req.access_mode, req.to) - .await - .map(DelegateCapsResponse) - .map_err(map_err) - }) - .await - } - ImportCaps(msg) => { - chan.rpc(msg, engine, |engine, req| async move { - engine - .import_caps(req.caps) - .await - .map(|_| ImportCapsResponse) - .map_err(map_err) - }) - .await - } - SyncWithPeer(msg) => { - chan.bidi_streaming(msg, engine, |engine, req, update_stream| { - // TODO: refactor to use less tasks - let (events_tx, events_rx) = tokio::sync::mpsc::channel(32); - let engine = engine.clone(); - tokio::task::spawn(async move { - if let Err(err) = - sync_with_peer(engine, req, events_tx.clone(), update_stream).await - { - let _ = events_tx.send(Err(err.into())).await; - } - }); - ReceiverStream::new(events_rx) - }) - .await + }); + ReceiverStream::new(events_rx) + }) + .await + } + SyncWithPeerUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), } - SyncWithPeerUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), } } // TODO: Try to use the streams directly instead of spawning two tasks. -async fn sync_with_peer( - engine: Engine, +async fn sync_with_peer( + handler: Handler, req: SyncWithPeerRequest, events_tx: mpsc::Sender>, mut update_stream: impl Stream + Unpin + Send + 'static, ) -> anyhow::Result<()> { + let engine = handler.spaces()?; let handle = engine .sync_with_peer(req.peer, req.init) .await From d7d3355ef37b0b525a6e89c70317b58dac9c44a5 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 10:26:31 +0200 Subject: [PATCH 176/198] fix: fixes after merging main --- Cargo.lock | 177 ++++++++-------------------------- Cargo.toml | 6 +- iroh-net/src/endpoint.rs | 4 +- iroh-willow/Cargo.toml | 1 - iroh-willow/examples/bench.rs | 9 +- iroh-willow/src/net.rs | 11 ++- iroh-willow/tests/basic.rs | 9 +- 7 files changed, 62 insertions(+), 155 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 637c2db057..ea0b727ea5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -351,8 +351,8 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "pin-project-lite", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", + "rustls", + "rustls-pemfile", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2025,8 +2025,8 @@ dependencies = [ "once_cell", "rand", "ring", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", + "rustls", + "rustls-pemfile", "serde", "thiserror", "time", @@ -2074,7 +2074,7 @@ dependencies = [ "parking_lot", "rand", "resolv-conf", - "rustls 0.23.11", + "rustls", "serde", "smallvec", "thiserror", @@ -2099,7 +2099,7 @@ dependencies = [ "hickory-resolver", "ipnet", "prefix-trie", - "rustls 0.23.11", + "rustls", "serde", "thiserror", "time", @@ -2302,7 +2302,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.11", + "rustls", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2507,7 +2507,7 @@ dependencies = [ "iroh-io", "iroh-metrics", "iroh-net", - "iroh-quinn 0.11.3", + "iroh-quinn", "iroh-test", "iroh-willow", "nested_enum_utils", @@ -2604,7 +2604,7 @@ dependencies = [ "iroh-io", "iroh-metrics", "iroh-net", - "iroh-quinn 0.11.3", + "iroh-quinn", "iroh-test", "num_cpus", "oneshot", @@ -2618,7 +2618,7 @@ dependencies = [ "redb 1.5.1", "redb 2.1.1", "reflink-copy", - "rustls 0.23.11", + "rustls", "self_cell", "serde", "serde_json", @@ -2719,8 +2719,8 @@ dependencies = [ "rcgen", "redb 2.1.1", "regex", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", + "rustls", + "rustls-pemfile", "serde", "struct_iterable", "strum 0.26.3", @@ -2798,7 +2798,7 @@ dependencies = [ "iroh-blake3", "iroh-metrics", "iroh-net", - "iroh-quinn 0.11.3", + "iroh-quinn", "iroh-test", "postcard", "rand", @@ -2878,9 +2878,9 @@ dependencies = [ "iroh-base", "iroh-metrics", "iroh-net", - "iroh-quinn 0.11.3", - "iroh-quinn-proto 0.11.6", - "iroh-quinn-udp 0.5.4", + "iroh-quinn", + "iroh-quinn-proto", + "iroh-quinn-udp", "iroh-test", "libc", "mainline", @@ -2905,9 +2905,9 @@ dependencies = [ "reqwest", "ring", "rtnetlink", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", - "rustls-webpki 0.102.6", + "rustls", + "rustls-pemfile", + "rustls-webpki", "serde", "serde_json", "serde_with", @@ -2952,32 +2952,15 @@ dependencies = [ "hdrhistogram", "iroh-metrics", "iroh-net", - "iroh-quinn 0.11.3", + "iroh-quinn", "rcgen", - "rustls 0.23.11", + "rustls", "socket2", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "iroh-quinn" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906875956feb75d3d41d708ddaffeb11fdb10cd05f23efbcb17600037e411779" -dependencies = [ - "bytes", - "iroh-quinn-proto 0.10.8", - "iroh-quinn-udp 0.4.2", - "pin-project-lite", - "rustc-hash 1.1.0", - "rustls 0.21.12", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "iroh-quinn" version = "0.11.3" @@ -2985,35 +2968,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fd590a39a14cfc168efa4d894de5039d65641e62d8da4a80733018ababe3c33" dependencies = [ "bytes", - "iroh-quinn-proto 0.11.6", - "iroh-quinn-udp 0.5.4", + "iroh-quinn-proto", + "iroh-quinn-udp", "pin-project-lite", "rustc-hash 2.0.0", - "rustls 0.23.11", + "rustls", "socket2", "thiserror", "tokio", "tracing", ] -[[package]] -name = "iroh-quinn-proto" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6bf92478805e67f2320459285496e1137edf5171411001a0d4d85f9bbafb792" -dependencies = [ - "bytes", - "rand", - "ring", - "rustc-hash 1.1.0", - "rustls 0.21.12", - "rustls-native-certs 0.6.3", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - [[package]] name = "iroh-quinn-proto" version = "0.11.6" @@ -3024,7 +2989,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.11", + "rustls", "rustls-platform-verifier", "slab", "thiserror", @@ -3032,19 +2997,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "iroh-quinn-udp" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc7915b3a31f08ee0bc02f73f4d61a5d5be146a1081ef7f70622a11627fd314" -dependencies = [ - "bytes", - "libc", - "socket2", - "tracing", - "windows-sys 0.48.0", -] - [[package]] name = "iroh-quinn-udp" version = "0.5.4" @@ -3088,7 +3040,6 @@ dependencies = [ "iroh-io", "iroh-metrics", "iroh-net", - "iroh-quinn 0.10.5", "iroh-test", "meadowcap", "postcard", @@ -4330,7 +4281,7 @@ dependencies = [ "futures-sink", "futures-util", "hex", - "iroh-quinn 0.11.3", + "iroh-quinn", "pin-project", "serde", "slab", @@ -4369,7 +4320,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 1.1.0", - "rustls 0.23.11", + "rustls", "thiserror", "tokio", "tracing", @@ -4385,7 +4336,7 @@ dependencies = [ "rand", "ring", "rustc-hash 1.1.0", - "rustls 0.23.11", + "rustls", "slab", "thiserror", "tinyvec", @@ -4703,8 +4654,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.11", - "rustls-pemfile 2.1.2", + "rustls", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -4844,17 +4795,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.11" @@ -4865,23 +4805,11 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.4", - "schannel", - "security-framework", -] - [[package]] name = "rustls-native-certs" version = "0.7.3" @@ -4889,21 +4817,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.2", + "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.1.2" @@ -4931,10 +4850,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.11", - "rustls-native-certs 0.7.3", + "rustls", + "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.102.6", + "rustls-webpki", "security-framework", "security-framework-sys", "webpki-roots", @@ -4947,16 +4866,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.6" @@ -5048,16 +4957,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sec1" version = "0.7.3" @@ -5932,7 +5831,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.11", + "rustls", "rustls-pki-types", "tokio", ] @@ -5955,7 +5854,7 @@ dependencies = [ "rcgen", "reqwest", "ring", - "rustls 0.23.11", + "rustls", "serde", "serde_json", "thiserror", @@ -6383,7 +6282,7 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.11", + "rustls", "rustls-pki-types", "url", "webpki-roots", diff --git a/Cargo.toml b/Cargo.toml index a5b7ef6c84..e999e16d4b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ debug = true [profile.dev-ci] inherits = 'dev' -opt-level = 1 +opt-level = 1 [profile.optimized-release] inherits = 'release' @@ -51,5 +51,5 @@ unused-async = "warn" # willow-encoding = { path = "../willow-rs/encoding" } # meadowcap = { path = "../willow-rs/meadowcap" } willow-data-model = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } -willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } -meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } +willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } +meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } diff --git a/iroh-net/src/endpoint.rs b/iroh-net/src/endpoint.rs index 9e3644cabc..def2c14786 100644 --- a/iroh-net/src/endpoint.rs +++ b/iroh-net/src/endpoint.rs @@ -39,8 +39,8 @@ mod rtt_actor; use self::rtt_actor::RttMessage; pub use quinn::{ - ApplicationClose, Connection, ConnectionClose, ConnectionError, ReadError, RecvStream, - RetryError, SendStream, ServerConfig, TransportConfig, VarInt, WriteError, + ApplicationClose, Connection, ConnectionClose, ConnectionError, ReadError, ReadExactError, + RecvStream, RetryError, SendStream, ServerConfig, TransportConfig, VarInt, WriteError, }; pub use super::magicsock::{ diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index dc9c3ad489..f2ccc03527 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -33,7 +33,6 @@ iroh-metrics = { version = "0.23.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.23.0", path = "../iroh-net" } meadowcap = "0.1.0" postcard = { version = "1", default-features = false, features = [ "alloc", "use-std", "experimental-derive", ] } -quinn = { package = "iroh-quinn", version = "0.10.5" } rand = "0.8.5" rand_core = "0.6.4" redb = { version = "2.0.0" } diff --git a/iroh-willow/examples/bench.rs b/iroh-willow/examples/bench.rs index d24e2b8c60..1e08017d76 100644 --- a/iroh-willow/examples/bench.rs +++ b/iroh-willow/examples/bench.rs @@ -146,14 +146,17 @@ mod util { let engine = engine.clone(); let endpoint = endpoint.clone(); async move { - while let Some(mut conn) = endpoint.accept().await { - let Ok(alpn) = conn.alpn().await else { + while let Some(incoming) = endpoint.accept().await { + let Ok(mut connecting) = incoming.accept() else { + continue; + }; + let Ok(alpn) = connecting.alpn().await else { continue; }; if alpn != ALPN { continue; } - let Ok(conn) = conn.await else { + let Ok(conn) = connecting.await else { continue; }; engine.handle_connection(conn).await?; diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index e2b1a6f45b..c0e2bc9cef 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -6,8 +6,9 @@ use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; use futures_util::future::TryFutureExt; use iroh_base::key::NodeId; -use iroh_net::endpoint::{Connection, ConnectionError, ReadError, RecvStream, SendStream, VarInt}; -use quinn::ReadExactError; +use iroh_net::endpoint::{ + Connection, ConnectionError, ReadError, ReadExactError, RecvStream, SendStream, VarInt, +}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{debug, trace}; @@ -293,7 +294,9 @@ async fn send_loop( // trace!(len, "sent"); } trace!(?channel, "send: close writer"); - send_stream.finish().await?; + send_stream.finish()?; + // We don't await SendStream::stopped, because we rely on application level closing notifiations, + // and make sure that the connection is closed gracefully in any case. trace!(?channel, "send: done"); Ok(()) } @@ -323,7 +326,7 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { // Send a single byte on a newly opened uni stream. let mut send_stream = conn.open_uni().await?; send_stream.write_u8(1).await?; - send_stream.finish().await?; + send_stream.finish()?; // Wait until we either receive the goodbye byte from the other peer, or for the other peer // to close the connection with the expected error code. match tokio::time::timeout(SHUTDOWN_TIMEOUT, wait_for_goodbye_or_graceful_close(conn)).await { diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 30afa0b3fd..7b4b679bfe 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -332,14 +332,17 @@ mod util { let engine = engine.clone(); let endpoint = endpoint.clone(); async move { - while let Some(mut conn) = endpoint.accept().await { - let Ok(alpn) = conn.alpn().await else { + while let Some(incoming) = endpoint.accept().await { + let Ok(mut connecting) = incoming.accept() else { + continue; + }; + let Ok(alpn) = connecting.alpn().await else { continue; }; if alpn != ALPN { continue; } - let Ok(conn) = conn.await else { + let Ok(conn) = connecting.await else { continue; }; engine.handle_connection(conn).await?; From de2eaa2e6b0445aca868fd2e04cbbcd1a8bbbf71 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 10:28:23 +0200 Subject: [PATCH 177/198] chore: use willow-rs main branch --- Cargo.lock | 6 +++--- Cargo.toml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea0b727ea5..bb92b6846a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3284,7 +3284,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "meadowcap" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=main#1e9943e39d08f9ec8b79fc6a5805be449a19f5d0" dependencies = [ "either", "signature", @@ -6487,7 +6487,7 @@ checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "willow-data-model" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=main#1e9943e39d08f9ec8b79fc6a5805be449a19f5d0" dependencies = [ "bytes", "either", @@ -6499,7 +6499,7 @@ dependencies = [ [[package]] name = "willow-encoding" version = "0.1.0" -source = "git+https://github.com/n0-computer/willow-rs.git?branch=iroh-dev#9553d8f9aa73092b0deaae2e832964876f2c7caa" +source = "git+https://github.com/n0-computer/willow-rs.git?branch=main#1e9943e39d08f9ec8b79fc6a5805be449a19f5d0" dependencies = [ "either", "syncify", diff --git a/Cargo.toml b/Cargo.toml index e999e16d4b..dd55465de9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,6 @@ unused-async = "warn" # willow-data-model = { path = "../willow-rs/data-model" } # willow-encoding = { path = "../willow-rs/encoding" } # meadowcap = { path = "../willow-rs/meadowcap" } -willow-data-model = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } -willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } -meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "iroh-dev" } +willow-data-model = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } +willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } +meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } From d3435754ebfac617c84fe6d06f93b311e691f015 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 22:43:25 +0200 Subject: [PATCH 178/198] tests: disable test to see if others pass --- iroh/src/client/docs.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/iroh/src/client/docs.rs b/iroh/src/client/docs.rs index 2b6946fcd8..6e2c3afcfb 100644 --- a/iroh/src/client/docs.rs +++ b/iroh/src/client/docs.rs @@ -753,7 +753,11 @@ mod tests { use super::*; + // TODO(Frando): This fails consistently with timeout, the task never joins. + // Something in the addition of willow engine to the node makes this + // test timeout - debugging outstanding. #[tokio::test] + #[ignore = "todo"] async fn test_drop_doc_client_sync() -> Result<()> { let _guard = iroh_test::logging::setup(); @@ -763,12 +767,17 @@ mod tests { let doc = client.docs().create().await?; let res = std::thread::spawn(move || { + println!("now drop doc"); drop(doc); + println!("now drop node"); drop(node); + println!("done"); }); + println!("wait task"); tokio::task::spawn_blocking(move || res.join().map_err(|e| anyhow::anyhow!("{:?}", e))) .await??; + println!("task done"); Ok(()) } From c9866d80335e1ae6da29c710533c16498f0eee5e Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 30 Aug 2024 00:35:45 +0200 Subject: [PATCH 179/198] chore: typos --- iroh-willow/src/net.rs | 2 +- iroh/src/node/builder.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index c0e2bc9cef..4735ce77a3 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -295,7 +295,7 @@ async fn send_loop( } trace!(?channel, "send: close writer"); send_stream.finish()?; - // We don't await SendStream::stopped, because we rely on application level closing notifiations, + // We don't await SendStream::stopped, because we rely on application level closing notifications, // and make sure that the connection is closed gracefully in any case. trace!(?channel, "send: done"); Ok(()) diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 837f879f5b..351d31db25 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -622,7 +622,7 @@ where Some(engine) } SpacesStorage::Persistent(_) => { - unimplemented!("peristent storage for willow is not yet implemented") + unimplemented!("persistent storage for willow is not yet implemented") } }; // Spawn the willow engine. From 0418fdf9c657646ac5781b076712eaba6295d2ed Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 30 Aug 2024 00:41:14 +0200 Subject: [PATCH 180/198] chore: clippy --- iroh/src/client/spaces.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 40bc1dcf1d..65be8ee852 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -488,7 +488,7 @@ impl SyncProgress { } fn is_ready(&self) -> bool { - self.complete == true || self.failed.is_some() + self.complete || self.failed.is_some() } fn into_completion(self) -> Result { @@ -524,17 +524,17 @@ impl SyncHandleSet { /// /// Returns an error if there is already a sync intent for this peer in the set. pub fn insert(&mut self, peer: NodeId, handle: SyncHandle) -> Result<(), IntentExistsError> { - if self.intents.contains_key(&peer) { - Err(IntentExistsError(peer)) - } else { + if let std::collections::hash_map::Entry::Vacant(e) = self.intents.entry(peer) { let SyncHandle { update_tx, event_rx, state, } = handle; self.event_rx.insert(peer, StreamNotifyClose::new(event_rx)); - self.intents.insert(peer, HandleState { update_tx, state }); + e.insert(HandleState { update_tx, state }); Ok(()) + } else { + Err(IntentExistsError(peer)) } } From 81c562fa2a7909a46e2852593105808dcd9d26b8 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 30 Aug 2024 00:53:50 +0200 Subject: [PATCH 181/198] chore: fmt --- iroh-willow/src/store/auth.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 6e2142276a..7b435fd6d3 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -236,8 +236,7 @@ impl Auth { } else { None }; - let pack = - CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); + let pack = CapabilityPack::Read(ReadAuthorisation::new(new_read_cap, new_subspace_cap)); Ok(pack) } From aeff2735bfd88bb5fbad832f6e0362e07136e76b Mon Sep 17 00:00:00 2001 From: Franz Heinzmann Date: Fri, 30 Aug 2024 01:25:40 +0200 Subject: [PATCH 182/198] fix(iroh-willow): actually create subspace capability (#2683) Subspace capabilities were not created when creating owned namespaces. Fixed that. Also adds a test written by @Voronar, thanks! --- iroh-willow/src/form.rs | 6 ++++ iroh-willow/src/store/auth.rs | 28 ++++++++++------- iroh-willow/tests/basic.rs | 59 ++++++++++++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 12 deletions(-) diff --git a/iroh-willow/src/form.rs b/iroh-willow/src/form.rs index c93f28914d..113a416748 100644 --- a/iroh-willow/src/form.rs +++ b/iroh-willow/src/form.rs @@ -114,6 +114,12 @@ impl EntryForm { payload: PayloadForm::Bytes(payload.into()), } } + + /// Sets the subspace for the entry. + pub fn subspace(mut self, subspace: SubspaceId) -> Self { + self.subspace_id = SubspaceForm::Exact(subspace); + self + } } /// Select which capability to use for authenticating a new entry. diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 7b435fd6d3..84b3c74d2a 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -18,7 +18,10 @@ use crate::{ data_model::WriteCapability, grouping::AreaOfInterest, keys::{NamespaceId, UserId}, - meadowcap::{AccessMode, FailedDelegationError, McCapability, ReadAuthorisation}, + meadowcap::{ + AccessMode, FailedDelegationError, McCapability, McSubspaceCapability, + ReadAuthorisation, + }, }, store::traits::{CapsStorage, SecretStorage, SecretStoreError, Storage}, }; @@ -126,12 +129,6 @@ impl Auth { namespace_id: NamespaceId, user_id: UserId, ) -> Result<[CapabilityPack; 2], AuthError> { - // let namespace_key = namespace_id - // .into_public_key() - // .map_err(|_| AuthError::InvalidNamespaceId(namespace_id))?; - // let user_key: UserPublicKey = user_id - // .into_public_key() - // .map_err(|_| AuthError::InvalidUserId(user_id))?; let read_cap = self.create_read_cap(namespace_id, user_id)?; let write_cap = self.create_write_cap(namespace_id, user_id)?; let pack = [read_cap, write_cap]; @@ -145,16 +142,25 @@ impl Auth { user_key: UserId, ) -> Result { let cap = if namespace_key.is_communal() { - McCapability::new_communal(namespace_key, user_key, AccessMode::Read)? + let read_cap = McCapability::new_communal(namespace_key, user_key, AccessMode::Read)?; + ReadAuthorisation::new(read_cap, None) } else { let namespace_secret = self .secrets .get_namespace(&namespace_key) .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; - McCapability::new_owned(namespace_key, &namespace_secret, user_key, AccessMode::Read)? + let read_cap = McCapability::new_owned( + namespace_key, + &namespace_secret, + user_key, + AccessMode::Read, + )?; + let subspace_cap = + McSubspaceCapability::new(namespace_key, &namespace_secret, user_key) + .map_err(AuthError::SubspaceCapDelegationFailed)?; + ReadAuthorisation::new(read_cap, Some(subspace_cap)) }; - // TODO: Subspace capability. - let pack = CapabilityPack::Read(ReadAuthorisation::new(cap, None)); + let pack = CapabilityPack::Read(cap); Ok(pack) } diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 7b4b679bfe..7603565789 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -8,16 +8,19 @@ use futures_lite::StreamExt; use iroh_blobs::store::{Map, MapEntry}; use iroh_io::AsyncSliceReaderExt; use iroh_willow::{ - interest::{Interests, IntoAreaOfInterest}, + form::EntryForm, + interest::{CapSelector, DelegateTo, Interests, IntoAreaOfInterest, RestrictArea}, proto::{ data_model::{Path, PathExt}, grouping::{Area, AreaExt, Range3d}, + keys::NamespaceKind, }, session::{ intents::{Completion, EventKind}, SessionInit, SessionMode, }, }; +use meadowcap::AccessMode; use self::util::{create_rng, insert, setup_and_delegate, spawn_two, Peer}; @@ -276,6 +279,60 @@ async fn peer_manager_twoway_loop() -> Result<()> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn owned_namespace_subspace_write_sync() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("owned_namespace_subspace_write_sync"); + + let [alfie, betty] = spawn_two(&mut rng).await?; + + let user_alfie = alfie.create_user().await?; + let user_betty = betty.create_user().await?; + + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + let restriction = RestrictArea::Restrict(Area::new_subspace(user_betty)); + + let cap_for_betty = alfie + .delegate_caps( + CapSelector::any(namespace_id), + AccessMode::Write, + DelegateTo::new(user_betty, restriction), + ) + .await?; + + betty.import_caps(cap_for_betty).await?; + + // Insert an entry into our subspace. + let path = Path::from_bytes(&[b"foo"])?; + let entry = EntryForm::new_bytes(namespace_id, path, "foo"); + betty.insert_entry(entry, user_betty).await?; + + // Make sure we cannot write into alfie's subspace. + let path = Path::from_bytes(&[b"foo"])?; + let entry = EntryForm::new_bytes(namespace_id, path, "foo").subspace(user_alfie); + assert!(betty.insert_entry(entry, user_betty).await.is_err()); + + // Make sure sync runs correctl.y + let init = SessionInit::new( + Interests::builder().add_full_cap(namespace_id), + SessionMode::ReconcileOnce, + ); + let mut intent = alfie.sync_with_peer(betty.node_id(), init).await.unwrap(); + let completion = intent.complete().await.expect("failed to complete intent"); + assert_eq!(completion, Completion::Partial); + let entries: Vec<_> = alfie + .get_entries(namespace_id, Range3d::new_full()) + .await? + .try_collect() + .await?; + assert_eq!(entries.len(), 1); + + Ok(()) +} + mod util { use std::sync::{Arc, Mutex}; From ce9635822087a389e4d8c38b50aecaef07f7d2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Fri, 30 Aug 2024 09:46:02 +0200 Subject: [PATCH 183/198] depend on exact version of `derive_more` --- iroh-willow/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index f2ccc03527..01f84eb910 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -18,7 +18,7 @@ workspace = true anyhow = "1" bytes = { version = "1.4", features = ["serde"] } curve25519-dalek = { version = "4.1.3", features = [ "digest", "rand_core", "serde", ] } -derive_more = { version = "1.0.0-beta.6", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } +derive_more = { version = "=1.0.0-beta.7", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } futures-buffered = "0.2.6" futures-concurrency = "7.6.0" From 9216fd64ec8f5d802bea25a9605f33af1c6cfc91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Fri, 30 Aug 2024 12:05:49 +0200 Subject: [PATCH 184/198] fix: Don't block current tokio task in `Drop` --- iroh-willow/src/engine/actor.rs | 31 ++++++++++++++++++++----------- iroh/src/client/docs.rs | 1 - 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index c8a68307d2..701d4fc127 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -218,25 +218,34 @@ impl Drop for ActorHandle { fn drop(&mut self) { // this means we're dropping the last reference if let Some(handle) = Arc::get_mut(&mut self.join_handle) { + // gain ownership of handle let handle = handle.take().expect("can only drop once"); + // gain ownership of inbox_tx + let (dumb, _) = tokio::sync::mpsc::channel(1); + let inbox_tx = std::mem::replace(&mut self.inbox_tx, dumb); + + // shutdown + let shutdown = move || { + if let Err(err) = inbox_tx.blocking_send(Input::Shutdown { reply: None }) { + warn!(?err, "Failed to send shutdown"); + } else { + if let Err(err) = handle.join() { + warn!(?err, "Failed to join sync actor"); + } + } + }; + match tokio::runtime::Handle::try_current() { Ok(runtime) => { - let (dumb, _) = tokio::sync::mpsc::channel(1); - let inbox_tx = std::mem::replace(&mut self.inbox_tx, dumb); - runtime - .spawn(async move { inbox_tx.send(Input::Shutdown { reply: None }).await }); + // We shouldn't block the runtime + runtime.spawn_blocking(shutdown); } Err(_) => { - self.inbox_tx - .blocking_send(Input::Shutdown { reply: None }) - .ok(); + // We can do everything sync + shutdown(); } } - - if let Err(err) = handle.join() { - warn!(?err, "Failed to join sync actor"); - } } } } diff --git a/iroh/src/client/docs.rs b/iroh/src/client/docs.rs index 6e2c3afcfb..04fb0b50f5 100644 --- a/iroh/src/client/docs.rs +++ b/iroh/src/client/docs.rs @@ -757,7 +757,6 @@ mod tests { // Something in the addition of willow engine to the node makes this // test timeout - debugging outstanding. #[tokio::test] - #[ignore = "todo"] async fn test_drop_doc_client_sync() -> Result<()> { let _guard = iroh_test::logging::setup(); From b593a807428a8045652a94b194cc00a1d64d3da8 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 15:59:49 +0200 Subject: [PATCH 185/198] feat: event subscriptions --- iroh-willow/src/engine/actor.rs | 5 +- iroh-willow/src/proto/grouping.rs | 3 + iroh-willow/src/session/data.rs | 47 +++--- iroh-willow/src/session/reconciler.rs | 10 +- iroh-willow/src/session/run.rs | 3 - iroh-willow/src/store.rs | 27 ++-- iroh-willow/src/store/entry.rs | 179 --------------------- iroh-willow/src/store/memory.rs | 221 +++++++++++++++++++++++--- iroh-willow/src/store/traits.rs | 163 ++++++++++++++++++- 9 files changed, 405 insertions(+), 253 deletions(-) delete mode 100644 iroh-willow/src/store/entry.rs diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 701d4fc127..3728a5734c 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -22,8 +22,7 @@ use crate::{ }, session::{intents::Intent, run_session, Error, EventSender, SessionHandle}, store::{ - entry::EntryOrigin, - traits::{EntryReader, SecretStorage, Storage}, + traits::{EntryOrigin, EntryReader, EntryStorage, SecretStorage, Storage}, Store, }, }; @@ -445,7 +444,7 @@ impl Actor { origin, reply, } => { - let res = self.store.entries().ingest(&authorised_entry, origin); + let res = self.store.entries().ingest_entry(&authorised_entry, origin); send_reply(reply, res) } Input::InsertEntry { entry, auth, reply } => { diff --git a/iroh-willow/src/proto/grouping.rs b/iroh-willow/src/proto/grouping.rs index a6e3510c6e..140acffa31 100644 --- a/iroh-willow/src/proto/grouping.rs +++ b/iroh-willow/src/proto/grouping.rs @@ -37,6 +37,7 @@ pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< >; /// Extension methods for [`AreaOfInterest`]. +// TODO: Upstream to willow-rs as methods on [`AreaOfInterest]. pub trait AreaOfInterestExt { /// Creates a new area of interest with the specified area and no other limits. fn with_area(area: Area) -> AreaOfInterest; @@ -53,6 +54,7 @@ impl AreaOfInterestExt for AreaOfInterest { } /// Extension methods for [`Area`]. +// TODO: Upstream to willow-rs as methods on [`Area`]. pub trait AreaExt { /// Returns `true` if the area contains `point`. fn includes_point(&self, point: &Point) -> bool; @@ -93,6 +95,7 @@ impl AreaExt for Area { /// A single point in the 3D range space. /// /// I.e. an entry. +// TODO: Upstream to willow-rs. #[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct Point { #[serde(with = "data_model::serde_encoding::path")] diff --git a/iroh-willow/src/session/data.rs b/iroh-willow/src/session/data.rs index dc52568895..b58b2dfece 100644 --- a/iroh-willow/src/session/data.rs +++ b/iroh-willow/src/session/data.rs @@ -1,5 +1,4 @@ use futures_lite::StreamExt; -use tokio::sync::broadcast; use crate::{ proto::{ @@ -8,8 +7,7 @@ use crate::{ }, session::{channels::ChannelSenders, static_tokens::StaticTokens, Error, SessionId}, store::{ - entry::{EntryChannel, EntryOrigin}, - traits::Storage, + traits::{EntryOrigin, EntryStorage, Storage, StoreEvent, SubscribeParams}, Store, }, util::stream::CancelableReceiver, @@ -51,7 +49,7 @@ impl DataSender { } } pub async fn run(mut self) -> Result<(), Error> { - let mut entry_stream = self.store.entries().subscribe(self.session_id); + let mut entry_stream = futures_concurrency::stream::StreamGroup::new(); loop { tokio::select! { input = self.inbox.next() => { @@ -59,21 +57,28 @@ impl DataSender { break; }; let Input::AoiIntersection(intersection) = input; - self.store.entries().watch_area( - self.session_id, - intersection.namespace, - intersection.intersection.area.clone(), - ); + let params = SubscribeParams::default().ingest_only().ignore_remote(self.session_id); + // TODO: We could start at the progress id at the beginning of the session. + let stream = self + .store + .entries() + .subscribe_area( + intersection.namespace, + intersection.intersection.area.clone(), + params, + ) + .filter_map(|event| match event { + StoreEvent::Ingested(_id, entry, _origin) => Some(entry), + // We get only Ingested events because we set ingest_only() param above. + _ => unreachable!("expected only Ingested event but got another event"), + }); + entry_stream.insert(stream); }, - entry = entry_stream.recv() => { + entry = entry_stream.next(), if !entry_stream.is_empty() => { match entry { - Ok(entry) => self.send_entry(entry).await?, - Err(broadcast::error::RecvError::Closed) => break, - Err(broadcast::error::RecvError::Lagged(_count)) => { - // TODO: Queue another reconciliation - } + Some(entry) => self.send_entry(entry).await?, + None => break, } - } } } @@ -149,13 +154,9 @@ impl DataReceiver { message.dynamic_token, ) .await?; - self.store.entries().ingest( - &authorised_entry, - EntryOrigin::Remote { - session: self.session_id, - channel: EntryChannel::Data, - }, - )?; + self.store + .entries() + .ingest_entry(&authorised_entry, EntryOrigin::Remote(self.session_id))?; let (entry, _token) = authorised_entry.into_parts(); // TODO: handle offset self.current_payload.set( diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index edd4bf6379..c0ffc994f7 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -29,8 +29,7 @@ use crate::{ Error, Role, SessionId, }, store::{ - entry::{EntryChannel, EntryOrigin}, - traits::{EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, + traits::{EntryOrigin, EntryReader, EntryStorage, SplitAction, SplitOpts, Storage}, Store, }, util::{ @@ -164,12 +163,9 @@ impl Reconciler { authorised_entry.entry().payload_length(), message.entry.available, )?; - self.shared.store.entries().ingest( + self.shared.store.entries().ingest_entry( &authorised_entry, - EntryOrigin::Remote { - session: self.shared.session_id, - channel: EntryChannel::Reconciliation, - }, + EntryOrigin::Remote(self.shared.session_id), )?; } ReconciliationMessage::SendPayload(message) => { diff --git a/iroh-willow/src/session/run.rs b/iroh-willow/src/session/run.rs index e5324e3f57..ad45228582 100644 --- a/iroh-willow/src/session/run.rs +++ b/iroh-willow/src/session/run.rs @@ -386,9 +386,6 @@ pub(crate) async fn run_session( .try_join() .await; - // Unsubscribe from the store. - store.entries().unsubscribe(&session_id); - // Track if we closed the session by triggering the cancel token, or if the remote peer closed // the session by closing the control channel. let we_cancelled = close_session_token.is_cancelled(); diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index ee8efdda41..e729fd6e82 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -6,6 +6,7 @@ use anyhow::{anyhow, Context, Result}; use rand_core::CryptoRngCore; +use traits::EntryStorage; use crate::{ form::{AuthForm, EntryForm, EntryOrForm, SubspaceForm, TimestampForm}, @@ -22,42 +23,40 @@ use crate::{ use self::auth::{Auth, AuthError}; use self::traits::Storage; -pub(crate) use self::entry::{EntryOrigin, WatchableEntryStore}; +pub(crate) use self::traits::EntryOrigin; pub(crate) mod auth; -pub(crate) mod entry; pub mod memory; pub mod traits; /// Storage for the Willow engine. +/// +/// Wraps a `Storage` instance and adds the [`Auth`] struct that uses the secret and caps storage to provide +/// authentication when inserting entries. #[derive(Debug, Clone)] pub(crate) struct Store { - entries: WatchableEntryStore, - secrets: S::Secrets, - payloads: S::Payloads, + storage: S, auth: Auth, } impl Store { pub fn new(storage: S) -> Self { Self { - entries: WatchableEntryStore::new(storage.entries().clone()), - secrets: storage.secrets().clone(), - payloads: storage.payloads().clone(), auth: Auth::new(storage.secrets().clone(), storage.caps().clone()), + storage, } } - pub fn entries(&self) -> &WatchableEntryStore { - &self.entries + pub fn entries(&self) -> &S::Entries { + self.storage.entries() } pub fn secrets(&self) -> &S::Secrets { - &self.secrets + self.storage.secrets() } pub fn payloads(&self) -> &S::Payloads { - &self.payloads + self.storage.payloads() } pub fn auth(&self) -> &Auth { @@ -97,7 +96,7 @@ impl Store { let authorised_entry = AuthorisedEntry::new_unchecked(entry, token); let inserted = self .entries() - .ingest(&authorised_entry, EntryOrigin::Local)?; + .ingest_entry(&authorised_entry, EntryOrigin::Local)?; Ok((authorised_entry, inserted)) } @@ -118,7 +117,7 @@ impl Store { /// the provided [`Store`]. /// /// `user_id` must be set to the user who is authenticating the entry. - pub async fn form_to_entry( + async fn form_to_entry( &self, form: EntryForm, user_id: UserId, // auth: AuthForm, diff --git a/iroh-willow/src/store/entry.rs b/iroh-willow/src/store/entry.rs deleted file mode 100644 index 499066f1ee..0000000000 --- a/iroh-willow/src/store/entry.rs +++ /dev/null @@ -1,179 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; -use tokio::sync::broadcast; - -use crate::proto::{ - data_model::{AuthorisedEntry, NamespaceId}, - grouping::Area, -}; - -pub type SessionId = u64; - -use super::traits::EntryStorage; - -const BROADCAST_CAP: usize = 1024; - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum EntryOrigin { - /// The entry is inserted locally. - Local, - /// The entry was received from a peer. - Remote { - session: SessionId, - channel: EntryChannel, - }, // TODO: Add details. - // Remote { - // peer: NodeId, - // channel: EntryChannel, - // }, -} - -impl EntryOrigin { - // pub fn peer(&self) -> Option { - // match self { - // EntryOrigin::Local => None, - // EntryOrigin::Remote { peer, .. } => Some(peer) - // } - // } -} - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum EntryChannel { - Reconciliation, - Data, -} - -#[derive(Debug, Clone)] -pub struct WatchableEntryStore { - storage: ES, - broadcast: Arc>, -} - -impl WatchableEntryStore { - pub(super) fn new(storage: ES) -> Self { - Self { - storage, - broadcast: Default::default(), - } - } - - // /// Returns a store reader. - // pub fn reader(&self) -> ES::Reader { - // self.storage.reader() - // } - - /// Returns a store snapshot. - pub fn snapshot(&self) -> anyhow::Result { - self.storage.snapshot() - } - - /// Returns a store reader. - pub fn reader(&self) -> ES::Reader { - self.storage.reader() - } - - /// Ingest a new entry. - /// - /// Returns `true` if the entry was stored, and `false` if the entry already exists or is - /// obsoleted by an existing entry. - pub fn ingest(&self, entry: &AuthorisedEntry, origin: EntryOrigin) -> anyhow::Result { - if self.storage.ingest_entry(entry)? { - self.broadcast.lock().unwrap().broadcast(entry, origin); - Ok(true) - } else { - Ok(false) - } - } - - /// Setup a new subscription, identified by `session_id`. - /// - /// The subscription will initially be empty. To actually receive newly ingested entries, - /// add areas to watch with [`Self::watch_area`]. - /// - /// Returns a [`broadcast::Receiver`]. - pub fn subscribe(&self, session_id: SessionId) -> broadcast::Receiver { - self.broadcast - .lock() - .unwrap() - .subscribe(session_id, BROADCAST_CAP) - } - - /// Remove a subscription. - pub fn unsubscribe(&self, session_id: &SessionId) { - self.broadcast.lock().unwrap().unsubscribe(session_id) - } - - /// Add an area to the list of watched areas for a subscription. - /// - /// The subscription has to be setup with [`Self::subscribe`] to actually receive new entries - /// that fall within the area. - pub fn watch_area(&self, session: SessionId, namespace: NamespaceId, area: Area) { - self.broadcast - .lock() - .unwrap() - .watch_area(session, namespace, area); - } -} - -#[derive(Debug, Default)] -struct Broadcaster { - senders: HashMap>, - watched_areas: HashMap>>, -} - -impl Broadcaster { - fn subscribe( - &mut self, - session: SessionId, - cap: usize, - ) -> broadcast::Receiver { - self.senders - .entry(session) - .or_insert_with(|| broadcast::Sender::new(cap)) - .subscribe() - } - - fn unsubscribe(&mut self, session: &SessionId) { - self.senders.remove(session); - self.watched_areas.retain(|_namespace, sessions| { - sessions.remove(session); - !sessions.is_empty() - }); - } - - fn watch_area(&mut self, session: SessionId, namespace: NamespaceId, area: Area) { - self.watched_areas - .entry(namespace) - .or_default() - .entry(session) - .or_default() - .push(area) - } - - fn broadcast(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) { - let Some(sessions) = self.watched_areas.get_mut(entry.entry().namespace_id()) else { - return; - }; - let mut dropped_receivers = vec![]; - for (session_id, areas) in sessions { - // Do not broadcast back into sessions where the entry came from. - if matches!(origin, EntryOrigin::Remote { session, ..} if session == *session_id) { - continue; - } - // Check if the session is watching an area where the entry falls into. - if areas.iter().any(|area| area.includes_entry(entry.entry())) { - if let Some(sender) = self.senders.get(session_id) { - // Send the entry and mark senders with dropped receivers for removal. - if let Err(_err) = sender.send(entry.clone()) { - dropped_receivers.push(*session_id); - } - } - } - } - for session_id in dropped_receivers { - self.unsubscribe(&session_id); - } - } -} diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 4ccc46a8af..fd70a7e933 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -6,11 +6,15 @@ //! hopefully easily kept correct. use std::cell::RefCell; -use std::collections::HashMap; -use std::rc::Rc; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::rc::{Rc, Weak}; +use std::task::{ready, Context, Poll, Waker}; use anyhow::Result; +use futures_util::Stream; +use crate::proto::grouping::Area; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ @@ -23,6 +27,9 @@ use crate::{ store::traits::{self, RangeSplit, SplitAction, SplitOpts}, }; +use super::traits::{StoreEvent, SubscribeParams}; +use super::EntryOrigin; + #[derive(Debug, Clone, Default)] pub struct Store { secrets: Rc>, @@ -97,6 +104,7 @@ impl traits::SecretStorage for Rc> { #[derive(Debug, Default)] pub struct EntryStore { entries: HashMap>, + events: EventQueue, } // impl + 'static> ReadonlyStore for T { @@ -221,27 +229,14 @@ impl traits::EntryReader for Rc> { } } -impl traits::EntryStorage for Rc> { - type Snapshot = Self; - type Reader = Self; - - fn reader(&self) -> Self::Reader { - self.clone() - } - - fn snapshot(&self) -> Result { - let entries = self.borrow().entries.clone(); - Ok(Rc::new(RefCell::new(EntryStore { entries }))) - } - - fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result { - let mut slf = self.borrow_mut(); - let entries = slf +impl EntryStore { + fn ingest_entry(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) -> Result { + let entries = self .entries .entry(*entry.entry().namespace_id()) .or_default(); let new = entry.entry(); - let mut to_remove = vec![]; + let mut to_prune = vec![]; for (i, existing) in entries.iter().enumerate() { let existing = existing.entry(); if existing == new { @@ -258,17 +253,199 @@ impl traits::EntryStorage for Rc> { && new.path().is_prefix_of(existing.path()) && new.is_newer_than(existing) { - to_remove.push(i); + to_prune.push(i); } } - for i in to_remove { - entries.remove(i); + for i in to_prune { + let pruned = entries.remove(i).into_parts().0; + self.events.insert(|id| { + StoreEvent::Pruned( + id, + traits::PruneEvent { + pruned: ( + pruned.namespace_id().clone(), + pruned.subspace_id().clone(), + pruned.path().clone(), + ), + by: entry.clone(), + }, + ) + }); } entries.push(entry.clone()); + self.events + .insert(|id| StoreEvent::Ingested(id, entry.clone(), origin)); Ok(true) } } +impl traits::EntryStorage for Rc> { + type Snapshot = Self; + type Reader = Self; + + fn reader(&self) -> Self::Reader { + self.clone() + } + + fn snapshot(&self) -> Result { + let entries = self.borrow().entries.clone(); + Ok(Rc::new(RefCell::new(EntryStore { + entries, + events: EventQueue::default(), + }))) + } + + fn ingest_entry(&self, entry: &AuthorisedEntry, origin: EntryOrigin) -> Result { + let mut slf = self.borrow_mut(); + slf.ingest_entry(entry, origin) + } + + fn subscribe_area( + &self, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + ) -> impl Stream + Unpin + 'static { + EventStream { + area, + params, + namespace, + progress_id: self.borrow().events.next_progress_id(), + store: Rc::downgrade(&self), + } + } + + fn resume_subscription( + &self, + progress_id: u64, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + ) -> impl Stream + Unpin + 'static { + EventStream { + area, + params, + progress_id, + namespace, + store: Rc::downgrade(&self), + } + } +} + +/// Stream of events from a store subscription. +/// +/// We have weak pointer to the entry store and thus the EventQueue. +/// Once the store is dropped, the EventQueue wakes all streams a last time in its drop impl, +/// which then makes the stream return none because Weak::upgrade returns None. +#[derive(Debug)] +struct EventStream { + progress_id: u64, + store: Weak>, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, +} + +impl Stream for EventStream { + type Item = StoreEvent; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(store) = self.store.upgrade() else { + return Poll::Ready(None); + }; + let mut store = store.borrow_mut(); + let res = ready!(store.events.poll_next( + self.progress_id, + |e| e.matches(self.namespace, &self.area, &self.params), + cx, + )); + drop(store); + Poll::Ready(match res { + None => None, + Some((next_id, event)) => { + self.progress_id = next_id; + Some(event) + } + }) + } +} + +/// A simple in-memory event queue. +/// +/// Events can be pushed, and get a unique monotonically-increasing *progress id*. +/// Events can be polled, with a progress id to start at, and an optional filter function. +/// +/// Current in-memory impl keeps all events, forever. +// TODO: Add max_len constructor, add a way to truncate old entries. +// TODO: This would be quite a bit more efficient if we filtered the waker with a closure +// that is set from the last poll, to not wake everyone for each new event. +#[derive(Debug)] +struct EventQueue { + events: VecDeque, + offset: u64, + wakers: VecDeque, +} + +impl Drop for EventQueue { + fn drop(&mut self) { + for waker in self.wakers.drain(..) { + waker.wake() + } + } +} + +impl Default for EventQueue { + fn default() -> Self { + Self { + events: Default::default(), + offset: 0, + wakers: Default::default(), + } + } +} + +impl EventQueue { + fn insert(&mut self, f: impl Fn(u64) -> T) { + let progress_id = self.next_progress_id(); + let event = f(progress_id); + self.events.push_back(event); + for waker in self.wakers.drain(..) { + waker.wake() + } + } + + fn next_progress_id(&self) -> u64 { + self.offset + self.events.len() as u64 + } + + fn get(&self, progress_id: u64) -> Option<&T> { + let index = progress_id.checked_sub(self.offset)?; + self.events.get(index as usize) + } + + fn poll_next( + &mut self, + progress_id: u64, + filter: impl Fn(&T) -> bool, + cx: &mut Context<'_>, + ) -> Poll> { + if progress_id < self.offset { + return Poll::Ready(None); + } + let mut i = progress_id; + loop { + if let Some(event) = self.get(i) { + i += 1; + if filter(event) { + break Poll::Ready(Some((i, event.clone()))); + } + } else { + self.wakers.push_back(cx.waker().clone()); + break Poll::Pending; + } + } + } +} + #[derive(Debug, Default)] pub struct CapsStore { write_caps: HashMap>, diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index ceb13dd671..b7397551a7 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -3,12 +3,13 @@ use std::fmt::Debug; use anyhow::Result; +use futures_lite::Stream; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ data_model::{AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, WriteCapability}, - grouping::Range3d, + grouping::{Area, Range3d}, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self, ReadAuthorisation}, wgps::Fingerprint, @@ -82,7 +83,33 @@ pub trait EntryStorage: EntryReader + Clone + Debug + 'static { fn reader(&self) -> Self::Reader; fn snapshot(&self) -> Result; - fn ingest_entry(&self, entry: &AuthorisedEntry) -> Result; + + /// Ingest a new entry. + /// + /// Returns `true` if the entry was ingested, and `false` if the entry was not ingested because a newer entry exists. + fn ingest_entry(&self, entry: &AuthorisedEntry, origin: EntryOrigin) -> Result; + + /// Subscribe to events concerning entries [included](https://willowprotocol.org/specs/grouping-entries/index.html#area_include) + /// by an [`AreaOfInterest`], returning a producer of `StoreEvent`s which occurred since the moment of calling this function. + /// + /// If `ignore_incomplete_payloads` is `true`, the producer will not produce entries with incomplete corresponding payloads. + /// If `ignore_empty_payloads` is `true`, the producer will not produce entries with a `payload_length` of `0`. + fn subscribe_area( + &self, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + ) -> impl Stream + Unpin + 'static; + + /// Attempt to resume a subscription using a *progress ID* obtained from a previous subscription, or return an error + /// if this store implementation is unable to resume the subscription. + fn resume_subscription( + &self, + progress_id: u64, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + ) -> impl Stream + Unpin + 'static; } /// Read-only interface to [`EntryStorage`]. @@ -179,3 +206,135 @@ pub trait CapsStorage: Debug + Clone { fn get_read_cap(&self, selector: &CapSelector) -> Result>; } + +/// An event which took place within a [`EntryStorage`]. +/// Each event includes a *progress ID* which can be used to *resume* a subscription at any point in the future. +#[derive(Debug, Clone)] +pub enum StoreEvent { + /// A new entry was ingested. + Ingested(u64, AuthorisedEntry, EntryOrigin), + // PayloadForgotten(u64, PD), + /// An entry was pruned via prefix pruning. + Pruned(u64, PruneEvent), + // /// An existing entry received a portion of its corresponding payload. + // Appended(u64, LengthyAuthorisedEntry), + // /// An entry was forgotten. + // EntryForgotten(u64, (S, Path)), + // /// A payload was forgotten. +} + +impl StoreEvent { + pub fn progress_id(&self) -> u64 { + match self { + StoreEvent::Ingested(id, _, _) => *id, + StoreEvent::Pruned(id, _) => *id, + } + } +} + +impl StoreEvent { + /// Returns `true` if the event is included in the `area` and not skipped by `ignore_params`. + pub fn matches( + &self, + namespace_id: NamespaceId, + area: &Area, + params: &SubscribeParams, + ) -> bool { + match self { + StoreEvent::Ingested(_, entry, origin) => { + *entry.entry().namespace_id() == namespace_id + && area.includes_entry(entry.entry()) + && params.includes_entry(entry.entry()) + && params.includes_origin(origin) + } + StoreEvent::Pruned(_, PruneEvent { pruned, by: _ }) => { + if !params.ingest_only + && pruned.0 == namespace_id + && area.subspace().includes(&pruned.1) + && area.path().is_prefix_of(&pruned.2) + { + true + } else { + false + } + } + } + } +} + +/// Describes an [`AuthorisedEntry`] which was pruned and the [`AuthorisedEntry`] which triggered the pruning. +#[derive(Debug, Clone)] +pub struct PruneEvent { + /// The subspace ID and path of the entry which was pruned. + pub pruned: (NamespaceId, SubspaceId, Path), + /// The entry which triggered the pruning. + pub by: AuthorisedEntry, +} + +/// The origin of an entry ingestion event. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum EntryOrigin { + /// The entry was probably created on this machine. + Local, + /// The entry was sourced from another device, e.g. a networked sync session. + Remote(u64), +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum EntryChannel { + Reconciliation, + Data, +} + +/// Describes which entries to ignore during a query. +#[derive(Debug, Default)] +pub struct SubscribeParams { + /// Omit entries whose payload is the empty string. + pub ignore_empty_payloads: bool, + /// Omit entries whose origin is this remote. + pub ignore_remote: Option, + /// Only emit ingestion events. + pub ingest_only: bool, + // TODO: ignore_incomplete_payloads is harder to support for us because we need to query the blob store each time currently. + // /// Omit entries with locally incomplete corresponding payloads. + // pub ignore_incomplete_payloads: bool, +} + +impl SubscribeParams { + // pub fn ignore_incomplete_payloads(&mut self) { + // self.ignore_incomplete_payloads = true; + // } + + pub fn ignore_empty_payloads(mut self) -> Self { + self.ignore_empty_payloads = true; + self + } + + pub fn ignore_remote(mut self, remote: u64) -> Self { + self.ignore_remote = Some(remote); + self + } + + pub fn ingest_only(mut self) -> Self { + self.ingest_only = true; + self + } + + pub fn includes_entry(&self, entry: &Entry) -> bool { + if self.ignore_empty_payloads && entry.payload_length() == 0 { + false + } else { + true + } + } + + pub fn includes_origin(&self, origin: &EntryOrigin) -> bool { + match &self.ignore_remote { + None => true, + Some(ignored_session) => match origin { + EntryOrigin::Local => true, + EntryOrigin::Remote(session) => session != ignored_session, + }, + } + } +} From cb207ad8ee42b05f57d1f007e5784a755a34b863 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 16:40:05 +0200 Subject: [PATCH 186/198] feat: RPC for subscriptions --- iroh-willow/src/engine/actor.rs | 96 +++++++++++++++++++++++++++++++-- iroh-willow/src/store/memory.rs | 12 ++--- iroh-willow/src/store/traits.rs | 29 ++++++---- iroh/src/client/spaces.rs | 38 ++++++++++--- iroh/src/node/rpc/spaces.rs | 26 +++++++++ iroh/src/rpc_protocol/spaces.rs | 22 +++++--- 6 files changed, 188 insertions(+), 35 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 3728a5734c..7d447187c1 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, thread::JoinHandle}; use anyhow::Result; -use futures_lite::stream::Stream; +use futures_lite::{stream::Stream, StreamExt}; use iroh_base::key::NodeId; use tokio::{ sync::{mpsc, oneshot}, @@ -16,13 +16,16 @@ use crate::{ net::ConnHandle, proto::{ data_model::{AuthorisedEntry, Path, SubspaceId}, - grouping::Range3d, + grouping::{Area, Range3d}, keys::{NamespaceId, NamespaceKind, UserId, UserSecretKey}, meadowcap::{self, AccessMode}, }, session::{intents::Intent, run_session, Error, EventSender, SessionHandle}, store::{ - traits::{EntryOrigin, EntryReader, EntryStorage, SecretStorage, Storage}, + traits::{ + EntryOrigin, EntryReader, EntryStorage, SecretStorage, Storage, StoreEvent, + SubscribeParams, + }, Store, }, }; @@ -211,6 +214,42 @@ impl ActorHandle { reply_rx.await?; Ok(()) } + + pub async fn subscribe_area( + &self, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + sender: mpsc::Sender, + ) -> Result<()> { + self.send(Input::SubscribeArea { + namespace, + area, + params, + sender, + }) + .await?; + Ok(()) + } + + pub async fn resume_subscription( + &self, + progress_id: u64, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + sender: mpsc::Sender, + ) -> Result<()> { + self.send(Input::ResumeSubscription { + progress_id, + namespace, + area, + params, + sender, + }) + .await?; + Ok(()) + } } impl Drop for ActorHandle { @@ -308,6 +347,19 @@ pub enum Input { #[debug(skip)] reply: Option>, }, + SubscribeArea { + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + sender: mpsc::Sender, + }, + ResumeSubscription { + progress_id: u64, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, + sender: mpsc::Sender, + }, } #[derive(Debug)] @@ -488,6 +540,44 @@ impl Actor { let res = self.store.auth().resolve_interests(interests); send_reply(reply, res.map_err(anyhow::Error::from)) } + Input::SubscribeArea { + namespace, + area, + params, + sender, + } => { + let store = self.store.clone(); + self.tasks.spawn_local(async move { + let mut stream = store.entries().subscribe_area(namespace, area, params); + while let Some(event) = stream.next().await { + if let Err(_) = sender.send(event).await { + break; + } + } + }); + Ok(()) + } + Input::ResumeSubscription { + progress_id, + namespace, + area, + params, + sender, + } => { + let store = self.store.clone(); + self.tasks.spawn_local(async move { + let mut stream = + store + .entries() + .resume_subscription(progress_id, namespace, area, params); + while let Some(event) = stream.next().await { + if let Err(_) = sender.send(event).await { + break; + } + } + }); + Ok(()) + } } } } diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index fd70a7e933..1cac6acaa3 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -257,16 +257,12 @@ impl EntryStore { } } for i in to_prune { - let pruned = entries.remove(i).into_parts().0; - self.events.insert(|id| { + let pruned = entries.remove(i); + self.events.insert(move |id| { StoreEvent::Pruned( id, traits::PruneEvent { - pruned: ( - pruned.namespace_id().clone(), - pruned.subspace_id().clone(), - pruned.path().clone(), - ), + pruned, by: entry.clone(), }, ) @@ -404,7 +400,7 @@ impl Default for EventQueue { } impl EventQueue { - fn insert(&mut self, f: impl Fn(u64) -> T) { + fn insert(&mut self, f: impl FnOnce(u64) -> T) { let progress_id = self.next_progress_id(); let event = f(progress_id); self.events.push_back(event); diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index b7397551a7..65e19a15f1 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -4,11 +4,14 @@ use std::fmt::Debug; use anyhow::Result; use futures_lite::Stream; +use serde::{Deserialize, Serialize}; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ - data_model::{AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, WriteCapability}, + data_model::{ + self, AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, WriteCapability, + }, grouping::{Area, Range3d}, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, meadowcap::{self, ReadAuthorisation}, @@ -209,10 +212,14 @@ pub trait CapsStorage: Debug + Clone { /// An event which took place within a [`EntryStorage`]. /// Each event includes a *progress ID* which can be used to *resume* a subscription at any point in the future. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub enum StoreEvent { /// A new entry was ingested. - Ingested(u64, AuthorisedEntry, EntryOrigin), + Ingested( + u64, + #[serde(with = "data_model::serde_encoding::authorised_entry")] AuthorisedEntry, + EntryOrigin, + ), // PayloadForgotten(u64, PD), /// An entry was pruned via prefix pruning. Pruned(u64, PruneEvent), @@ -249,9 +256,8 @@ impl StoreEvent { } StoreEvent::Pruned(_, PruneEvent { pruned, by: _ }) => { if !params.ingest_only - && pruned.0 == namespace_id - && area.subspace().includes(&pruned.1) - && area.path().is_prefix_of(&pruned.2) + && *pruned.entry().namespace_id() == namespace_id + && area.includes_entry(pruned.entry()) { true } else { @@ -263,16 +269,17 @@ impl StoreEvent { } /// Describes an [`AuthorisedEntry`] which was pruned and the [`AuthorisedEntry`] which triggered the pruning. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct PruneEvent { - /// The subspace ID and path of the entry which was pruned. - pub pruned: (NamespaceId, SubspaceId, Path), + #[serde(with = "data_model::serde_encoding::authorised_entry")] + pub pruned: AuthorisedEntry, /// The entry which triggered the pruning. + #[serde(with = "data_model::serde_encoding::authorised_entry")] pub by: AuthorisedEntry, } /// The origin of an entry ingestion event. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub enum EntryOrigin { /// The entry was probably created on this machine. Local, @@ -287,7 +294,7 @@ pub enum EntryChannel { } /// Describes which entries to ignore during a query. -#[derive(Debug, Default)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct SubscribeParams { /// Omit entries whose payload is the empty string. pub ignore_empty_payloads: bool, diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 65be8ee852..581268d1ae 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -38,6 +38,7 @@ use iroh_willow::{ intents::{serde_encoding::Event, Completion, IntentUpdate}, SessionInit, }, + store::traits::{StoreEvent, SubscribeParams}, }; use ref_cast::RefCast; use serde::{Deserialize, Serialize}; @@ -353,14 +354,39 @@ impl Space { }) } - /// TODO - pub fn subscribe(&self, _area: Area) { - todo!() + /// Subscribe to events concerning entries included by an `Area`. + pub async fn subscribe_area( + &self, + area: Area, + params: SubscribeParams, + ) -> Result>> { + let req = SubscribeRequest { + namespace: self.namespace_id, + area, + params, + initial_progress_id: None, + }; + let stream = self.rpc.try_server_streaming(req).await?; + let stream = stream.map(|item| item.map_err(anyhow::Error::from)); + Ok(stream) } - /// TODO - pub fn subscribe_offset(&self, _area: Area, _offset: u64) { - todo!() + /// Resume a subscription using a progress ID obtained from a previous subscription. + pub async fn resume_subscription( + &self, + progress_id: u64, + area: Area, + params: SubscribeParams, + ) -> Result>> { + let req = SubscribeRequest { + namespace: self.namespace_id, + area, + params, + initial_progress_id: Some(progress_id), + }; + let stream = self.rpc.try_server_streaming(req).await?; + let stream = stream.map(|item| item.map_err(anyhow::Error::from)); + Ok(stream) } } diff --git a/iroh/src/node/rpc/spaces.rs b/iroh/src/node/rpc/spaces.rs index eb164c358e..3f8de9d8ad 100644 --- a/iroh/src/node/rpc/spaces.rs +++ b/iroh/src/node/rpc/spaces.rs @@ -155,6 +155,32 @@ impl Handler { .await } SyncWithPeerUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + Subscribe(msg) => { + chan.try_server_streaming(msg, self, |handler, req| async move { + let (tx, rx) = mpsc::channel(1024); + if let Some(progress_id) = req.initial_progress_id { + handler + .spaces()? + .resume_subscription( + progress_id, + req.namespace, + req.area, + req.params, + tx, + ) + .await + .map_err(map_err)?; + } else { + handler + .spaces()? + .subscribe_area(req.namespace, req.area, req.params, tx) + .await + .map_err(map_err)?; + } + Ok(ReceiverStream::new(rx).map(Ok)) + }) + .await + } } } } diff --git a/iroh/src/rpc_protocol/spaces.rs b/iroh/src/rpc_protocol/spaces.rs index 067c44399e..22aaffa5a2 100644 --- a/iroh/src/rpc_protocol/spaces.rs +++ b/iroh/src/rpc_protocol/spaces.rs @@ -9,7 +9,7 @@ use iroh_willow::{ self, serde_encoding::SerdeAuthorisedEntry, AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, }, - grouping::{self, Range3d}, + grouping::{self, Area, Range3d}, keys::{NamespaceKind, UserId}, meadowcap::{self, AccessMode, SecretKey}, }, @@ -17,6 +17,7 @@ use iroh_willow::{ intents::{serde_encoding::Event, IntentUpdate}, SessionInit, }, + store::traits::{StoreEvent, SubscribeParams}, }; use nested_enum_utils::enum_conversions; use quic_rpc_derive::rpc_requests; @@ -47,11 +48,11 @@ pub enum Request { DelegateCaps(DelegateCapsRequest), #[rpc(response = RpcResult)] ImportCaps(ImportCapsRequest), - // #[rpc(response = RpcResult)] - // ResolveInterests(ResolveInterestsRequest), #[bidi_streaming(update = SyncWithPeerUpdate, response = RpcResult)] SyncWithPeer(SyncWithPeerRequest), SyncWithPeerUpdate(SyncWithPeerUpdate), + #[try_server_streaming(create_error = RpcError, item_error = RpcError, item = StoreEvent)] + Subscribe(SubscribeRequest), } #[allow(missing_docs)] @@ -67,8 +68,8 @@ pub enum Response { CreateUser(RpcResult), DelegateCaps(RpcResult), ImportCaps(RpcResult), - // ResolveInterests(RpcResult), SyncWithPeer(RpcResult), + Subscribe(RpcResult), } #[derive(Debug, Serialize, Deserialize)] @@ -139,9 +140,7 @@ pub struct GetEntryRequest { } #[derive(Debug, Serialize, Deserialize)] -pub struct GetEntryResponse( - pub Option, // #[serde(with = "data_model::serde_encoding::authorised_entry")] pub AuthorisedEntry, -); +pub struct GetEntryResponse(pub Option); #[derive(Debug, Serialize, Deserialize)] pub struct CreateNamespaceRequest { @@ -192,6 +191,15 @@ pub enum SyncWithPeerResponse { Event(Event), } +#[derive(Debug, Serialize, Deserialize)] +pub struct SubscribeRequest { + pub namespace: NamespaceId, + #[serde(with = "grouping::serde_encoding::area")] + pub area: Area, + pub params: SubscribeParams, + pub initial_progress_id: Option, +} + /// Either a complete [`Entry`] or a [`FullEntryForm`]. #[derive(Debug, Serialize, Deserialize)] pub enum EntryOrForm { From 515465595c91a924c50dd99a6f2e38ca8d9ca8c2 Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 17:01:18 +0200 Subject: [PATCH 187/198] fix: fixes to subscriptions and add test --- iroh-willow/src/store/memory.rs | 62 +++++++++++++++++------- iroh/src/client/spaces.rs | 10 ++-- iroh/tests/spaces.rs | 83 ++++++++++++++++++++++++++++++++- 3 files changed, 134 insertions(+), 21 deletions(-) diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 1cac6acaa3..c2c30dfb7d 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -103,7 +103,12 @@ impl traits::SecretStorage for Rc> { #[derive(Debug, Default)] pub struct EntryStore { - entries: HashMap>, + stores: HashMap, +} + +#[derive(Debug, Default)] +pub struct NamespaceStore { + entries: Vec, events: EventQueue, } @@ -199,8 +204,9 @@ impl traits::EntryReader for Rc> { range: &Range3d, ) -> impl Iterator> + 'a { let slf = self.borrow(); - slf.entries + slf.stores .get(&namespace) + .map(|s| &s.entries) .into_iter() .flatten() .filter(|entry| range.includes_entry(entry.entry())) @@ -216,10 +222,11 @@ impl traits::EntryReader for Rc> { path: &Path, ) -> Result> { let inner = self.borrow(); - let Some(entries) = inner.entries.get(&namespace) else { + let Some(entries) = inner.stores.get(&namespace) else { return Ok(None); }; Ok(entries + .entries .iter() .find(|e| { let e = e.entry(); @@ -231,10 +238,11 @@ impl traits::EntryReader for Rc> { impl EntryStore { fn ingest_entry(&mut self, entry: &AuthorisedEntry, origin: EntryOrigin) -> Result { - let entries = self - .entries + let store = self + .stores .entry(*entry.entry().namespace_id()) .or_default(); + let entries = &mut store.entries; let new = entry.entry(); let mut to_prune = vec![]; for (i, existing) in entries.iter().enumerate() { @@ -258,7 +266,7 @@ impl EntryStore { } for i in to_prune { let pruned = entries.remove(i); - self.events.insert(move |id| { + store.events.insert(move |id| { StoreEvent::Pruned( id, traits::PruneEvent { @@ -269,7 +277,8 @@ impl EntryStore { }); } entries.push(entry.clone()); - self.events + store + .events .insert(|id| StoreEvent::Ingested(id, entry.clone(), origin)); Ok(true) } @@ -284,11 +293,23 @@ impl traits::EntryStorage for Rc> { } fn snapshot(&self) -> Result { - let entries = self.borrow().entries.clone(); - Ok(Rc::new(RefCell::new(EntryStore { - entries, - events: EventQueue::default(), - }))) + // This is quite ugly. But this is a quick memory impl only. + // But we should really maybe strive to not expose snapshots. + let stores = self + .borrow() + .stores + .iter() + .map(|(key, value)| { + ( + *key, + NamespaceStore { + entries: value.entries.clone(), + events: Default::default(), + }, + ) + }) + .collect(); + Ok(Rc::new(RefCell::new(EntryStore { stores }))) } fn ingest_entry(&self, entry: &AuthorisedEntry, origin: EntryOrigin) -> Result { @@ -302,11 +323,18 @@ impl traits::EntryStorage for Rc> { area: Area, params: SubscribeParams, ) -> impl Stream + Unpin + 'static { + let progress_id = self + .borrow_mut() + .stores + .entry(namespace) + .or_default() + .events + .next_progress_id(); EventStream { area, params, namespace, - progress_id: self.borrow().events.next_progress_id(), + progress_id, store: Rc::downgrade(&self), } } @@ -345,16 +373,18 @@ struct EventStream { impl Stream for EventStream { type Item = StoreEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let Some(store) = self.store.upgrade() else { + let Some(inner) = self.store.upgrade() else { return Poll::Ready(None); }; - let mut store = store.borrow_mut(); + let mut inner_mut = inner.borrow_mut(); + let store = inner_mut.stores.entry(self.namespace).or_default(); let res = ready!(store.events.poll_next( self.progress_id, |e| e.matches(self.namespace, &self.area, &self.params), cx, )); - drop(store); + drop(inner_mut); + drop(inner); Poll::Ready(match res { None => None, Some((next_id, event)) => { diff --git a/iroh/src/client/spaces.rs b/iroh/src/client/spaces.rs index 581268d1ae..e585cfa7fe 100644 --- a/iroh/src/client/spaces.rs +++ b/iroh/src/client/spaces.rs @@ -36,7 +36,7 @@ use iroh_willow::{ }, session::{ intents::{serde_encoding::Event, Completion, IntentUpdate}, - SessionInit, + SessionInit, SessionMode, }, store::traits::{StoreEvent, SubscribeParams}, }; @@ -99,7 +99,11 @@ impl Client { } /// Import a ticket and start to synchronize. - pub async fn import_and_sync(&self, ticket: SpaceTicket) -> Result<(Space, SyncHandleSet)> { + pub async fn import_and_sync( + &self, + ticket: SpaceTicket, + mode: SessionMode, + ) -> Result<(Space, SyncHandleSet)> { if ticket.caps.is_empty() { anyhow::bail!("Invalid ticket: Does not include any capabilities"); } @@ -111,7 +115,7 @@ impl Client { self.import_caps(ticket.caps).await?; let interests = Interests::builder().add_full_cap(CapSelector::any(namespace)); - let init = SessionInit::reconcile_once(interests); + let init = SessionInit::new(interests, mode); let mut intents = SyncHandleSet::default(); for addr in ticket.nodes { let node_id = addr.node_id; diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index b927347a78..f781fbee64 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -10,7 +10,8 @@ use iroh_willow::{ keys::NamespaceKind, meadowcap::AccessMode, }, - session::intents::Completion, + session::{intents::Completion, SessionMode}, + store::traits::{EntryOrigin, StoreEvent}, }; use tracing::info; @@ -75,7 +76,10 @@ async fn spaces_smoke() -> Result<()> { .await?; println!("ticket {ticket:?}"); - let (betty_space, betty_sync_intent) = betty.spaces().import_and_sync(ticket).await?; + let (betty_space, betty_sync_intent) = betty + .spaces() + .import_and_sync(ticket, SessionMode::ReconcileOnce) + .await?; let mut completion = betty_sync_intent.complete_all().await; assert_eq!(completion.len(), 1); @@ -132,3 +136,78 @@ async fn spaces_smoke() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn spaces_subscription() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let (alfie_addr, alfie) = spawn_node().await; + let (betty_addr, betty) = spawn_node().await; + info!("alfie is {}", alfie_addr.node_id.fmt_short()); + info!("betty is {}", betty_addr.node_id.fmt_short()); + + let betty_user = betty.spaces().create_user().await?; + let alfie_user = alfie.spaces().create_user().await?; + let alfie_space = alfie + .spaces() + .create(NamespaceKind::Owned, alfie_user) + .await?; + + let _namespace = alfie_space.namespace_id(); + + let mut alfie_sub = alfie_space + .subscribe_area(Area::new_full(), Default::default()) + .await?; + + let ticket = alfie_space + .share(betty_user, AccessMode::Write, RestrictArea::None) + .await?; + + let (betty_space, betty_sync_intent) = betty + .spaces() + .import_and_sync(ticket, SessionMode::ReconcileOnce) + .await?; + + let _sync_task = tokio::task::spawn(async move { + // TODO: We should add a "detach" method to a sync intent! + // (leaves the sync running but stop consuming events) + let _ = betty_sync_intent.complete_all().await; + }); + + let mut betty_sub = betty_space + .resume_subscription(0, Area::new_full(), Default::default()) + .await?; + + alfie_space + .insert_bytes( + EntryForm::new(alfie_user, Path::from_bytes(&[b"foo"])?), + "hi", + ) + .await?; + + betty_space + .insert_bytes( + EntryForm::new(betty_user, Path::from_bytes(&[b"foo"])?), + "hi", + ) + .await?; + + alfie_space + .insert_bytes( + EntryForm::new(alfie_user, Path::from_bytes(&[b"foo"])?), + "hi!!", + ) + .await?; + + let ev = alfie_sub.next().await.unwrap(); + println!("ALFIE 2"); + assert!(matches!(ev, StoreEvent::Ingested(0, _, EntryOrigin::Local))); + + let ev = betty_sub.next().await.unwrap(); + println!("BETTY 2"); + assert!(matches!( + ev, + StoreEvent::Ingested(0, _, EntryOrigin::Remote(_)) + )); + + Ok(()) +} From dd1e885a868ff5f61421dcfd14a380a1f020057d Mon Sep 17 00:00:00 2001 From: Frando Date: Thu, 29 Aug 2024 17:10:15 +0200 Subject: [PATCH 188/198] fix test --- iroh/tests/spaces.rs | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index f781fbee64..b91295e0c8 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -164,7 +164,7 @@ async fn spaces_subscription() -> Result<()> { let (betty_space, betty_sync_intent) = betty .spaces() - .import_and_sync(ticket, SessionMode::ReconcileOnce) + .import_and_sync(ticket, SessionMode::Continuous) .await?; let _sync_task = tokio::task::spawn(async move { @@ -177,13 +177,6 @@ async fn spaces_subscription() -> Result<()> { .resume_subscription(0, Area::new_full(), Default::default()) .await?; - alfie_space - .insert_bytes( - EntryForm::new(alfie_user, Path::from_bytes(&[b"foo"])?), - "hi", - ) - .await?; - betty_space .insert_bytes( EntryForm::new(betty_user, Path::from_bytes(&[b"foo"])?), @@ -191,23 +184,39 @@ async fn spaces_subscription() -> Result<()> { ) .await?; + let ev = betty_sub.next().await.unwrap().unwrap(); + println!("BETTY 1 {ev:?}"); + assert!(matches!(ev, StoreEvent::Ingested(0, _, EntryOrigin::Local))); + + let ev = alfie_sub.next().await.unwrap().unwrap(); + println!("ALFIE 1 {ev:?}"); + assert!(matches!( + ev, + StoreEvent::Ingested(0, _, EntryOrigin::Remote(_)) + )); + alfie_space .insert_bytes( - EntryForm::new(alfie_user, Path::from_bytes(&[b"foo"])?), + EntryForm::new(alfie_user, Path::from_bytes(&[b"bar"])?), "hi!!", ) .await?; - let ev = alfie_sub.next().await.unwrap(); - println!("ALFIE 2"); - assert!(matches!(ev, StoreEvent::Ingested(0, _, EntryOrigin::Local))); + let ev = alfie_sub.next().await.unwrap().unwrap(); + println!("ALFIE 2 {ev:?}"); + assert!(matches!(ev, StoreEvent::Ingested(1, _, EntryOrigin::Local))); - let ev = betty_sub.next().await.unwrap(); - println!("BETTY 2"); + let ev = betty_sub.next().await.unwrap().unwrap(); + println!("BETTY 2 {ev:?}"); assert!(matches!( ev, - StoreEvent::Ingested(0, _, EntryOrigin::Remote(_)) + StoreEvent::Ingested(1, _, EntryOrigin::Remote(_)) )); + // let resume_sub = alfie_space + // .resume_subscription(0, Area::new_full(), Default::default()) + // .await?; + // assert_eq!(resume_sub.count().await, 2); + Ok(()) } From 46c386cdab1195384208c6c260a5379fdffe64a5 Mon Sep 17 00:00:00 2001 From: Frando Date: Fri, 30 Aug 2024 00:47:24 +0200 Subject: [PATCH 189/198] chore: clippy --- iroh-willow/src/engine/actor.rs | 4 ++-- iroh-willow/src/store/memory.rs | 4 ++-- iroh-willow/src/store/traits.rs | 13 ++----------- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 7d447187c1..fde7773241 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -550,7 +550,7 @@ impl Actor { self.tasks.spawn_local(async move { let mut stream = store.entries().subscribe_area(namespace, area, params); while let Some(event) = stream.next().await { - if let Err(_) = sender.send(event).await { + if sender.send(event).await.is_err() { break; } } @@ -571,7 +571,7 @@ impl Actor { .entries() .resume_subscription(progress_id, namespace, area, params); while let Some(event) = stream.next().await { - if let Err(_) = sender.send(event).await { + if sender.send(event).await.is_err() { break; } } diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index c2c30dfb7d..3b17561050 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -335,7 +335,7 @@ impl traits::EntryStorage for Rc> { params, namespace, progress_id, - store: Rc::downgrade(&self), + store: Rc::downgrade(self), } } @@ -351,7 +351,7 @@ impl traits::EntryStorage for Rc> { params, progress_id, namespace, - store: Rc::downgrade(&self), + store: Rc::downgrade(self), } } } diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index 65e19a15f1..ebcea1a1b7 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -255,14 +255,9 @@ impl StoreEvent { && params.includes_origin(origin) } StoreEvent::Pruned(_, PruneEvent { pruned, by: _ }) => { - if !params.ingest_only + !params.ingest_only && *pruned.entry().namespace_id() == namespace_id && area.includes_entry(pruned.entry()) - { - true - } else { - false - } } } } @@ -328,11 +323,7 @@ impl SubscribeParams { } pub fn includes_entry(&self, entry: &Entry) -> bool { - if self.ignore_empty_payloads && entry.payload_length() == 0 { - false - } else { - true - } + !(self.ignore_empty_payloads && entry.payload_length() == 0) } pub fn includes_origin(&self, origin: &EntryOrigin) -> bool { From f3fa6fecca352d2f765dcb87e6b6c2f8dc3382ed Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 9 Sep 2024 12:08:41 +0200 Subject: [PATCH 190/198] add test from matheus23 --- iroh/tests/spaces.rs | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index b91295e0c8..23596acc84 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -220,3 +220,37 @@ async fn spaces_subscription() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_regression_restricted_area_sync() -> testresult::TestResult { + iroh_test::logging::setup_multithreaded(); + const TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2); + let (alfie_addr, alfie) = spawn_node().await; + let (betty_addr, betty) = spawn_node().await; + info!("alfie is {}", alfie_addr.node_id.fmt_short()); + info!("betty is {}", betty_addr.node_id.fmt_short()); + let alfie_user = alfie.spaces().create_user().await?; + let betty_user = betty.spaces().create_user().await?; + let alfie_space = alfie + .spaces() + .create(NamespaceKind::Owned, alfie_user) + .await?; + let space_ticket = alfie_space + .share( + betty_user, + AccessMode::Write, + // RestrictArea::None, // succeeds with this + RestrictArea::Restrict(Area::new_subspace(betty_user)), + ) + .await?; + let (betty_space, syncs) = betty + .spaces() + .import_and_sync(space_ticket, SessionMode::ReconcileOnce) + .await?; + let completion = tokio::time::timeout(TIMEOUT, syncs.complete_all()).await?; + println!("Completed syncs: {completion:#?}"); + let stream = betty_space.get_many(Range3d::new_full()).await?; + let entries: Vec<_> = stream.try_collect().await?; + println!("{entries:#?}"); + Ok(()) +} From 3c092ccd5962a1fee0b25a6ba8cd7be046a50fce Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 9 Sep 2024 12:11:39 +0200 Subject: [PATCH 191/198] fix: test --- iroh/tests/spaces.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index 23596acc84..c00ccac660 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -222,7 +222,7 @@ async fn spaces_subscription() -> Result<()> { } #[tokio::test] -async fn test_regression_restricted_area_sync() -> testresult::TestResult { +async fn test_restricted_area() -> testresult::TestResult { iroh_test::logging::setup_multithreaded(); const TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2); let (alfie_addr, alfie) = spawn_node().await; @@ -239,7 +239,6 @@ async fn test_regression_restricted_area_sync() -> testresult::TestResult { .share( betty_user, AccessMode::Write, - // RestrictArea::None, // succeeds with this RestrictArea::Restrict(Area::new_subspace(betty_user)), ) .await?; From d0b02500d00861f03da44b5279fe39c0afc5dfb5 Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 9 Sep 2024 12:39:27 +0200 Subject: [PATCH 192/198] fix: clippy --- iroh-willow/src/engine/actor.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 701d4fc127..f2fa10dcd1 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -229,10 +229,8 @@ impl Drop for ActorHandle { let shutdown = move || { if let Err(err) = inbox_tx.blocking_send(Input::Shutdown { reply: None }) { warn!(?err, "Failed to send shutdown"); - } else { - if let Err(err) = handle.join() { - warn!(?err, "Failed to join sync actor"); - } + } else if let Err(err) = handle.join() { + warn!(?err, "Failed to join sync actor"); } }; From d51c3fcbe1fb3dadcdf67d1c26f340d72f0c8cb4 Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 9 Sep 2024 19:30:03 +0200 Subject: [PATCH 193/198] chore: add todo comments --- iroh-willow/src/engine/actor.rs | 2 ++ iroh-willow/src/proto/data_model.rs | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 68b27923ed..2df1df88c3 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -546,6 +546,8 @@ impl Actor { } => { let store = self.store.clone(); self.tasks.spawn_local(async move { + // TODO: We wouldn't need to manually forward in a loop here if subscribe_area took a sender + // instead of returning a stream. let mut stream = store.entries().subscribe_area(namespace, area, params); while let Some(event) = stream.next().await { if sender.send(event).await.is_err() { diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 39895756d6..369e07eb1a 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -82,6 +82,7 @@ impl willow_data_model::PayloadDigest for PayloadDigest {} pub type Path = willow_data_model::Path; /// Extension methods for [`Path`]. +// TODO: Upstream the methods to willow-rs and remove the extension trait. pub trait PathExt { /// Creates a new path from a slice of bytes. fn from_bytes(slices: &[&[u8]]) -> Result; @@ -106,7 +107,7 @@ impl PathExt for Path { } #[derive(Debug, thiserror::Error)] -/// An error arising from trying to construct a invalid [`Path`] from valid components. +/// An error arising from trying to construct a invalid [`Path`] from potentially invalid components. pub enum InvalidPathError2 { /// One of the path's component is too large. #[error("One of the path's component is too large.")] @@ -143,6 +144,7 @@ pub type Entry = willow_data_model::Entry< >; /// Extension methods for [`Entry`]. +// TODO: Decide what to upstream to willow-rs. pub trait EntryExt { /// Encodes the entry into a bytestring. fn encode_to_vec(&self) -> Vec; From 3b9fd43644865833c8f6a1099534f460c327ae36 Mon Sep 17 00:00:00 2001 From: Frando Date: Mon, 9 Sep 2024 19:27:15 +0200 Subject: [PATCH 194/198] refactor: remove count field from ReconciliationAnnounceEntries --- iroh-willow/src/proto/wgps/messages.rs | 10 ++- iroh-willow/src/session/reconciler.rs | 91 ++++++++++++++------------ 2 files changed, 56 insertions(+), 45 deletions(-) diff --git a/iroh-willow/src/proto/wgps/messages.rs b/iroh-willow/src/proto/wgps/messages.rs index c1cdaa7593..b189f8c416 100644 --- a/iroh-willow/src/proto/wgps/messages.rs +++ b/iroh-willow/src/proto/wgps/messages.rs @@ -354,8 +354,8 @@ impl ReconciliationSendFingerprint { pub struct ReconciliationAnnounceEntries { /// The 3dRange whose LengthyEntries to transmit. pub range: SerdeRange3d, - /// The number of Entries the sender has in the range. - pub count: u64, + /// True if and only if the the sender has zero Entries in the range. + pub is_empty: bool, /// A boolean flag to indicate whether the sender wishes to receive a ReconciliationAnnounceEntries message for the same 3dRange in return. pub want_response: bool, /// Whether the sender promises to send the Entries in the range sorted from oldest to newest. @@ -397,7 +397,11 @@ pub struct ReconciliationSendPayload { /// Indicate that no more bytes will be transmitted for the currently transmitted Payload as part of set reconciliation. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReconciliationTerminatePayload; +pub struct ReconciliationTerminatePayload { + /// True if and only if no further ReconciliationSendEntry message will be sent as part of + /// reconciling the current 3dRange. + pub is_final: bool, +} /// Transmit an AuthorisedEntry to the other peer, and optionally prepare transmission of its Payload. #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index c0ffc994f7..bf0f0ab594 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - num::NonZeroU64, -}; +use std::collections::{HashMap, HashSet}; use bytes::Bytes; use futures_lite::StreamExt; @@ -136,7 +133,7 @@ impl Reconciler { ReconciliationMessage::AnnounceEntries(message) => { let target_id = message.handles(); self.entry_state - .received_announce_entries(target_id, message.count)?; + .received_announce_entries(target_id, message.is_empty)?; let target = self .targets .get_eventually(&self.shared, &target_id) @@ -173,9 +170,11 @@ impl Reconciler { .received_send_payload(self.shared.store.payloads(), message.bytes) .await?; } - ReconciliationMessage::TerminatePayload(_message) => { - if let Some(completed_target) = - self.entry_state.received_terminate_payload().await? + ReconciliationMessage::TerminatePayload(message) => { + if let Some(completed_target) = self + .entry_state + .received_terminate_payload(message.is_final) + .await? { let target = self .targets @@ -288,14 +287,17 @@ impl EntryState { self.0.is_none() } - pub fn received_announce_entries(&mut self, target: TargetId, count: u64) -> Result<(), Error> { + pub fn received_announce_entries( + &mut self, + target: TargetId, + is_empty: bool, + ) -> Result<(), Error> { if self.0.is_some() { return Err(Error::InvalidMessageInCurrentState); } - if let Some(count) = NonZeroU64::new(count) { + if !is_empty { self.0 = Some(EntryStateInner { target, - remaining_entries: Some(count), current_payload: CurrentPayload::default(), }); } @@ -310,10 +312,6 @@ impl EntryState { ) -> Result<(), Error> { let state = self.get_mut()?; state.current_payload.ensure_none()?; - state.remaining_entries = match state.remaining_entries.take() { - None => return Err(Error::InvalidMessageInCurrentState), - Some(c) => NonZeroU64::new(c.get().saturating_sub(1)), - }; state.current_payload.set( payload_digest, total_payload_length, @@ -335,10 +333,13 @@ impl EntryState { Ok(()) } - pub async fn received_terminate_payload(&mut self) -> Result, Error> { + pub async fn received_terminate_payload( + &mut self, + is_final: bool, + ) -> Result, Error> { let state = self.get_mut()?; state.current_payload.finalize().await?; - if state.remaining_entries.is_none() { + if is_final { let target_id = state.target; self.0 = None; Ok(Some(target_id)) @@ -358,7 +359,6 @@ impl EntryState { #[derive(Debug)] struct EntryStateInner { target: TargetId, - remaining_entries: Option, current_payload: CurrentPayload, } @@ -445,7 +445,7 @@ impl Target { if our_fingerprint == message.fingerprint { let reply = ReconciliationAnnounceEntries { range: message.range.clone(), - count: 0, + is_empty: true, want_response: false, will_sort: false, sender_handle: message.receiver_handle, @@ -456,7 +456,7 @@ impl Target { } // case 2: fingerprint is empty else if message.fingerprint.is_empty() { - self.announce_and_send_entries(shared, &message.range, true, Some(range_count), None) + self.announce_and_send_entries(shared, &message.range, true, Some(range_count), false) .await?; } // case 3: fingerprint doesn't match and is non-empty @@ -474,14 +474,8 @@ impl Target { let covers = is_last.then_some(range_count); match action { SplitAction::SendEntries(count) => { - self.announce_and_send_entries( - shared, - &subrange, - true, - covers, - Some(count), - ) - .await?; + self.announce_and_send_entries(shared, &subrange, true, covers, count > 0) + .await?; } SplitAction::SendFingerprint(fingerprint) => { self.send_fingerprint(shared, subrange, fingerprint, covers) @@ -507,7 +501,7 @@ impl Target { if message.want_response { let range_count = self.next_range_count_theirs(); - self.announce_and_send_entries(shared, &message.range, false, Some(range_count), None) + self.announce_and_send_entries(shared, &message.range, false, Some(range_count), false) .await?; } trace!("received_announce_entries done"); @@ -539,30 +533,39 @@ impl Target { range: &Range3d, want_response: bool, covers: Option, - our_entry_count: Option, + is_empty: bool, ) -> Result<(), Error> { - let our_entry_count = match our_entry_count { - Some(count) => count, - None => self.snapshot.count(self.namespace(), range)?, + if want_response { + self.mark_our_next_range_pending(); + } + + let (iter, is_empty) = if is_empty { + (None, true) + } else { + let mut iter = self + .snapshot + .get_authorised_entries(self.namespace(), range) + .peekable(); + let is_empty = iter.peek().is_none(); + (Some(iter), is_empty) }; + let msg = ReconciliationAnnounceEntries { range: range.clone().into(), - count: our_entry_count, + is_empty, want_response, will_sort: false, // todo: sorted? sender_handle: self.intersection.our_handle, receiver_handle: self.intersection.their_handle, covers, }; - if want_response { - self.mark_our_next_range_pending(); - } shared.send.send(msg).await?; - for authorised_entry in self - .snapshot - .get_authorised_entries(self.namespace(), range) - { + let Some(mut iter) = iter else { + return Ok(()); + }; + + while let Some(authorised_entry) = iter.next() { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); @@ -590,7 +593,11 @@ impl Target { }) .await?; } - shared.send.send(ReconciliationTerminatePayload).await?; + let is_final = iter.peek().is_none(); + shared + .send + .send(ReconciliationTerminatePayload { is_final }) + .await?; } Ok(()) } From 14047d36ddf7511975365a4c49e025d35795cd9a Mon Sep 17 00:00:00 2001 From: Frando Date: Tue, 10 Sep 2024 10:21:09 +0200 Subject: [PATCH 195/198] fix: do not use store snapshots anymore --- iroh-willow/src/session/reconciler.rs | 103 +++++++++++++------------- 1 file changed, 51 insertions(+), 52 deletions(-) diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index bf0f0ab594..8790f0181d 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -4,7 +4,7 @@ use bytes::Bytes; use futures_lite::StreamExt; use genawaiter::rc::Co; use iroh_blobs::store::Store as PayloadStore; -use tracing::{debug, trace}; +use tracing::debug; use crate::{ proto::{ @@ -53,7 +53,7 @@ pub enum Output { pub struct Reconciler { shared: Shared, recv: Cancelable>, - targets: TargetMap, + targets: TargetMap, entry_state: EntryState, } @@ -219,30 +219,30 @@ impl Reconciler { } #[derive(Debug)] -struct TargetMap { - map: HashMap>, +struct TargetMap { + map: HashMap, inbox: CancelableReceiver, } -impl TargetMap { +impl TargetMap { pub fn new(inbox: CancelableReceiver) -> Self { Self { map: Default::default(), inbox, } } - pub async fn get_eventually( + pub async fn get_eventually( &mut self, shared: &Shared, requested_id: &TargetId, - ) -> Result<&mut Target, Error> { + ) -> Result<&mut Target, Error> { if !self.map.contains_key(requested_id) { self.wait_for_target(shared, requested_id).await?; } return Ok(self.map.get_mut(requested_id).unwrap()); } - async fn wait_for_target( + async fn wait_for_target( &mut self, shared: &Shared, requested_id: &TargetId, @@ -261,13 +261,12 @@ impl TargetMap { Ok(()) } - async fn init_target( + async fn init_target( &mut self, shared: &Shared, intersection: AoiIntersection, ) -> Result { - let snapshot = shared.store.entries().snapshot()?; - let target = Target::init(snapshot, shared, intersection).await?; + let target = Target::init(shared, intersection).await?; let id = target.id(); debug!( our_handle = id.0.value(), @@ -375,29 +374,23 @@ struct Shared { } #[derive(Debug)] -struct Target { - snapshot: ::Snapshot, - +struct Target { intersection: AoiIntersection, - our_uncovered_ranges: HashSet, started: bool, - our_range_counter: u64, their_range_counter: u64, } -impl Target { +impl Target { fn id(&self) -> TargetId { self.intersection.id() } - async fn init( - snapshot: ::Snapshot, + async fn init( shared: &Shared, intersection: AoiIntersection, ) -> Result { let mut this = Target { - snapshot, intersection, our_uncovered_ranges: Default::default(), started: false, @@ -414,9 +407,12 @@ impl Target { self.intersection.namespace } - async fn initiate(&mut self, shared: &Shared) -> Result<(), Error> { + async fn initiate(&mut self, shared: &Shared) -> Result<(), Error> { let range = self.intersection.area().to_range(); - let fingerprint = self.snapshot.fingerprint(self.namespace(), &range)?; + let fingerprint = shared + .store + .entries() + .fingerprint(self.namespace(), &range)?; self.send_fingerprint(shared, range, fingerprint, None) .await?; Ok(()) @@ -426,7 +422,7 @@ impl Target { self.started && self.our_uncovered_ranges.is_empty() } - async fn received_send_fingerprint( + async fn received_send_fingerprint( &mut self, shared: &Shared, message: ReconciliationSendFingerprint, @@ -437,22 +433,15 @@ impl Target { } let range_count = self.next_range_count_theirs(); - let our_fingerprint = self - .snapshot + let our_fingerprint = shared + .store + .entries() .fingerprint(self.namespace(), &message.range)?; // case 1: fingerprint match. if our_fingerprint == message.fingerprint { - let reply = ReconciliationAnnounceEntries { - range: message.range.clone(), - is_empty: true, - want_response: false, - will_sort: false, - sender_handle: message.receiver_handle, - receiver_handle: message.sender_handle, - covers: Some(range_count), - }; - shared.send.send(reply).await?; + self.announce_and_send_entries(shared, &message.range, false, Some(range_count), true) + .await?; } // case 2: fingerprint is empty else if message.fingerprint.is_empty() { @@ -462,10 +451,10 @@ impl Target { // case 3: fingerprint doesn't match and is non-empty else { // reply by splitting the range into parts unless it is very short - // TODO: Expose + // TODO: Expose these options to a higher level. let split_opts = SplitOpts::default(); - let snapshot = self.snapshot.clone(); - let mut iter = snapshot + let store = shared.store.entries().clone(); + let mut iter = store .split_range(self.namespace(), &message.range, &split_opts)? .peekable(); while let Some(res) = iter.next() { @@ -474,7 +463,7 @@ impl Target { let covers = is_last.then_some(range_count); match action { SplitAction::SendEntries(count) => { - self.announce_and_send_entries(shared, &subrange, true, covers, count > 0) + self.announce_and_send_entries(shared, &subrange, true, covers, count == 0) .await?; } SplitAction::SendFingerprint(fingerprint) => { @@ -488,12 +477,11 @@ impl Target { Ok(()) } - async fn received_announce_entries( + async fn received_announce_entries( &mut self, shared: &Shared, message: ReconciliationAnnounceEntries, ) -> Result<(), Error> { - trace!(?message, "received_announce_entries start"); self.started = true; if let Some(range_count) = message.covers { self.mark_our_range_covered(range_count)?; @@ -504,11 +492,10 @@ impl Target { self.announce_and_send_entries(shared, &message.range, false, Some(range_count), false) .await?; } - trace!("received_announce_entries done"); Ok(()) } - async fn send_fingerprint( + async fn send_fingerprint( &mut self, shared: &Shared, range: Range3d, @@ -527,7 +514,9 @@ impl Target { Ok(()) } - async fn announce_and_send_entries( + /// Send a [`ReconciliationAnnounceEntries`] message for a range, and all entries in the range unless + /// `is_empty` is set to true. + async fn announce_and_send_entries( &mut self, shared: &Shared, range: &Range3d, @@ -539,17 +528,25 @@ impl Target { self.mark_our_next_range_pending(); } - let (iter, is_empty) = if is_empty { - (None, true) + // If we know for sure that our range is empty, we can skip creating the entry iterator alltogether. + let mut iter = if is_empty { + None } else { - let mut iter = self - .snapshot - .get_authorised_entries(self.namespace(), range) - .peekable(); - let is_empty = iter.peek().is_none(); - (Some(iter), is_empty) + Some( + shared + .store + .entries() + .get_authorised_entries(self.namespace(), range) + .peekable(), + ) }; + // Find out if we will send any entries at all. + let is_empty = iter + .as_mut() + .map(|iter| iter.peek().is_none()) + .unwrap_or(true); + // Send the announce message let msg = ReconciliationAnnounceEntries { range: range.clone().into(), is_empty, @@ -561,10 +558,12 @@ impl Target { }; shared.send.send(msg).await?; + // If our range is empty, we're done! let Some(mut iter) = iter else { return Ok(()); }; + // Otherwise send all the entries in our iterator, and payloads if applicable. while let Some(authorised_entry) = iter.next() { let authorised_entry = authorised_entry?; let (entry, token) = authorised_entry.into_parts(); From 63c95573ba379d6bc019a1730303ac8727d5e583 Mon Sep 17 00:00:00 2001 From: Franz Heinzmann Date: Tue, 24 Sep 2024 15:29:56 +0200 Subject: [PATCH 196/198] refactor(iroh-willow): refactor peer manager so that proptest passes reliably (#2727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Fixes #2695 * Refactor peer manager to really keep track of all connections, the previous logic of a single peer state was flawed for simultaneous accepts while closing previous connections. * Better debuggability of reconciler * Add proptest from #2695 and refactor to run in both directions simultaneously. ## Breaking Changes ## Notes & open questions ## Change checklist - [ ] Self-review. - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant. - [ ] Tests if relevant. - [ ] All breaking changes documented. --------- Co-authored-by: Philipp Krüger --- Cargo.lock | 44 +- iroh-willow/src/engine/actor.rs | 5 +- iroh-willow/src/engine/peer_manager.rs | 539 +++++++++++++------------ iroh-willow/src/net.rs | 37 +- iroh-willow/src/proto/data_model.rs | 18 + iroh-willow/src/session.rs | 2 +- iroh-willow/src/session/reconciler.rs | 24 +- iroh-willow/src/store/memory.rs | 5 + iroh/Cargo.toml | 1 + iroh/tests/spaces.proptest-regressions | 30 ++ iroh/tests/spaces.rs | 236 ++++++++++- 11 files changed, 645 insertions(+), 296 deletions(-) create mode 100644 iroh/tests/spaces.proptest-regressions diff --git a/Cargo.lock b/Cargo.lock index af61b754f3..169259b7f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2526,6 +2526,7 @@ dependencies = [ "serde_json", "strum 0.25.0", "tempfile", + "test-strategy 0.4.0", "testdir", "testresult", "thiserror", @@ -2773,7 +2774,7 @@ dependencies = [ "serde", "strum 0.25.0", "tempfile", - "test-strategy", + "test-strategy 0.3.1", "thiserror", "tokio", "tokio-stream", @@ -3052,7 +3053,7 @@ dependencies = [ "strum 0.26.3", "syncify", "tempfile", - "test-strategy", + "test-strategy 0.3.1", "thiserror", "tokio", "tokio-stream", @@ -5434,7 +5435,19 @@ checksum = "78ad9e09554f0456d67a69c1584c9798ba733a5b50349a6c0d0948710523922d" dependencies = [ "proc-macro2", "quote", - "structmeta-derive", + "structmeta-derive 0.2.0", + "syn 2.0.72", +] + +[[package]] +name = "structmeta" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" +dependencies = [ + "proc-macro2", + "quote", + "structmeta-derive 0.3.0", "syn 2.0.72", ] @@ -5449,6 +5462,17 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "structmeta-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "strum" version = "0.25.0" @@ -5682,7 +5706,19 @@ checksum = "b8361c808554228ad09bfed70f5c823caf8a3450b6881cc3a38eb57e8c08c1d9" dependencies = [ "proc-macro2", "quote", - "structmeta", + "structmeta 0.2.0", + "syn 2.0.72", +] + +[[package]] +name = "test-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf41af45e3f54cc184831d629d41d5b2bda8297e29c81add7ae4f362ed5e01b" +dependencies = [ + "proc-macro2", + "quote", + "structmeta 0.3.0", "syn 2.0.72", ] diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index 2df1df88c3..f72862ca40 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -265,9 +265,8 @@ impl Drop for ActorHandle { // shutdown let shutdown = move || { - if let Err(err) = inbox_tx.blocking_send(Input::Shutdown { reply: None }) { - warn!(?err, "Failed to send shutdown"); - } else if let Err(err) = handle.join() { + inbox_tx.blocking_send(Input::Shutdown { reply: None }).ok(); + if let Err(err) = handle.join() { warn!(?err, "Failed to join sync actor"); } }; diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index f23efc0e1d..8b43bd45cd 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, future::Future, sync::Arc, time::Duration}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result}; use futures_buffered::join_all; use futures_lite::{future::Boxed, StreamExt}; @@ -22,7 +22,7 @@ use crate::{ interest::Interests, net::{ establish, prepare_channels, terminate_gracefully, ChannelStreams, ConnHandle, ALPN, - ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_FAIL, ERROR_CODE_SHUTDOWN, + ERROR_CODE_DUPLICATE_CONN, ERROR_CODE_SHUTDOWN, }, proto::wgps::AccessChallenge, session::{ @@ -118,7 +118,7 @@ pub(super) struct PeerManager { session_events_rx: StreamMap>, peers: HashMap, accept_handlers: AcceptHandlers, - conn_tasks: JoinSet<(Role, NodeId, Result)>, + conn_tasks: JoinSet<(NodeId, ConnStep)>, shutting_down: bool, } @@ -176,7 +176,7 @@ impl PeerManager { match res { Err(err) if err.is_cancelled() => continue, Err(err) => Err(err).context("conn task panicked")?, - Ok((our_role, peer, out)) => self.handle_conn_output(our_role, peer, out).await?, + Ok((peer, out)) => self.handle_conn_output(peer, out).await?, } if self.shutting_down && self.conn_tasks.is_empty() { debug!("all connections gracefully terminated"); @@ -219,27 +219,26 @@ impl PeerManager { let peer_info = self .peers .entry(peer) - .or_insert_with(|| PeerInfo::new(Role::Betty, peer)); + .or_insert_with(|| PeerInfo::new(peer)); - debug!(peer = %peer.fmt_short(), our_state=%peer_info.state, "incoming connection"); + debug!(peer = %peer.fmt_short(), our_state=%peer_info.conn_state, "incoming connection"); - let accept_conn = match peer_info.state { - PeerState::None => true, - PeerState::Pending { - ref mut cancel_dial, - .. - } => match peer_info.our_role { - Role::Betty => { + let accept_conn = match peer_info.conn_state { + ConnState::None => true, + ConnState::Establishing { + ref mut our_dial, .. + } => match our_dial { + // No dial but already establishing a previous incoming connection + None => { debug!("ignore incoming connection (already accepting)"); conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-already-accepting"); false } - Role::Alfie => { + // We are dialing also: abort one of the conns + Some(cancel_dial) => { if peer > self.endpoint.node_id() { debug!("incoming connection for a peer we are dialing and their connection wins, abort dial"); - if let Some(cancel_dial) = cancel_dial.take() { - cancel_dial.cancel(); - } + cancel_dial.cancel(); true } else { debug!("ignore incoming connection (already dialing and ours wins)"); @@ -248,45 +247,34 @@ impl PeerManager { } } }, - PeerState::Active { .. } => { + ConnState::Active { .. } => { debug!("ignore incoming connection (already active)"); conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-already-active"); false } - PeerState::Closing { .. } => true, + ConnState::Terminating { .. } => true, }; if accept_conn { debug!(peer=%peer.fmt_short(), "accept connection"); - // Take any pending intents from the previous state and merge with the new betty intent. - let mut intents = match peer_info.state { - PeerState::Pending { - ref mut intents, .. - } - | PeerState::Closing { - ref mut intents, .. - } => std::mem::take(intents), - _ => vec![], - }; - intents.push(intent); - peer_info.state = PeerState::Pending { - intents, - cancel_dial: None, - }; - peer_info.our_role = Role::Betty; + peer_info.push_intent(intent).await; // Start connection establish task. let our_nonce = AccessChallenge::generate(); let fut = async move { - let (initial_transmission, channel_streams) = - establish(&conn, Role::Betty, our_nonce).await?; - Ok(ConnStep::Ready { - conn, - initial_transmission, + let res = establish(&conn, Role::Betty, our_nonce).await; + let res = res.map(|(initial_transmission, channel_streams)| Established { channel_streams, - }) + initial_transmission, + conn, + our_role: Role::Betty, + }); + ConnStep::Established(res) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); - peer_info.abort_handle = Some(abort_handle); + peer_info.conn_state = ConnState::Establishing { + our_dial: None, + abort_handle, + }; } } @@ -294,72 +282,55 @@ impl PeerManager { let peer_info = self .peers .entry(peer) - .or_insert_with(|| PeerInfo::new(Role::Alfie, peer)); - - debug!(peer=%peer.fmt_short(), state=%peer_info.state, "submit intent"); - - match peer_info.state { - PeerState::None => { - let our_nonce = AccessChallenge::generate(); - let endpoint = self.endpoint.clone(); - let cancel_dial = CancellationToken::new(); - let cancel_dial2 = cancel_dial.clone(); - // Future that dials and establishes the connection. Can be cancelled for simultaneous connection. - let fut = async move { - debug!("connecting"); - let conn = tokio::select! { - res = endpoint.connect_by_node_id(peer, ALPN) => res, - _ = cancel_dial.cancelled() => { - debug!("dial cancelled during dial"); - return Err(ConnectionError::LocallyClosed.into()); - } - }?; - let (initial_transmission, channel_streams) = tokio::select! { - res = establish(&conn, Role::Alfie, our_nonce) => res?, - _ = cancel_dial.cancelled() => { - debug!("dial cancelled during establish"); - conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-your-dial-wins"); - return Err(ConnectionError::LocallyClosed.into()); - }, - }; - Ok(ConnStep::Ready { - conn, - initial_transmission, - channel_streams, - }) - }; - let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); - peer_info.abort_handle = Some(abort_handle); - peer_info.state = PeerState::Pending { - intents: vec![intent], - cancel_dial: Some(cancel_dial2), - }; - } - PeerState::Pending { - ref mut intents, .. - } => { - intents.push(intent); - } - PeerState::Active { - ref update_tx, - ref mut intents_after_close, - .. - } => { - if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { - debug!("failed to submit intent into active session, queue in peer state"); - if let SessionUpdate::SubmitIntent(intent) = err.0 { - intents_after_close.push(intent); + .or_insert_with(|| PeerInfo::new(peer)); + + debug!(peer=%peer.fmt_short(), state=%peer_info.conn_state, "submit intent"); + if !peer_info.push_intent(intent).await { + self.connect_if_inactive(peer); + } + } + + fn connect_if_inactive(&mut self, peer: NodeId) { + let peer_info = self + .peers + .entry(peer) + .or_insert_with(|| PeerInfo::new(peer)); + if matches!(peer_info.conn_state, ConnState::None) { + let our_nonce = AccessChallenge::generate(); + let endpoint = self.endpoint.clone(); + let cancel_dial = CancellationToken::new(); + let cancel_dial2 = cancel_dial.clone(); + // Future that dials and establishes the connection. Can be cancelled for simultaneous connection. + let fut = async move { + debug!("connecting"); + let conn = tokio::select! { + res = endpoint.connect_by_node_id(peer, ALPN) => res, + _ = cancel_dial.cancelled() => { + debug!("dial cancelled during dial"); + return Err(ConnectionError::LocallyClosed.into()); } - } else { - trace!("intent sent to session"); - } - } - PeerState::Closing { - intents: ref mut new_intents, - .. - } => { - new_intents.push(intent); + }?; + let (initial_transmission, channel_streams) = tokio::select! { + res = establish(&conn, Role::Alfie, our_nonce) => res?, + _ = cancel_dial.cancelled() => { + debug!("dial cancelled during establish"); + conn.close(ERROR_CODE_DUPLICATE_CONN, b"duplicate-your-dial-wins"); + return Err(ConnectionError::LocallyClosed.into()); + }, + }; + Ok(Established { + conn, + initial_transmission, + channel_streams, + our_role: Role::Alfie, + }) } + .map(ConnStep::Established); + let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); + peer_info.conn_state = ConnState::Establishing { + our_dial: Some(cancel_dial2), + abort_handle, + }; } } @@ -384,38 +355,28 @@ impl PeerManager { return; }; - let PeerState::Active { - ref mut intents_after_close, - .. - } = peer_info.state - else { - warn!("got session complete event for peer not in active state"); - return; - }; - remaining_intents.append(intents_after_close); + peer_info.pending_intents.append(&mut remaining_intents); + peer_info.session_state = SessionState::None; - peer_info.state = PeerState::Closing { - intents: remaining_intents, - }; + if peer_info.conn_state.is_none() && peer_info.pending_intents.is_empty() { + self.peers.remove(&peer); + } else if peer_info.conn_state.is_none() { + self.connect_if_inactive(peer); + } trace!("entering closing state"); } } } #[instrument("conn", skip_all, fields(peer=%peer.fmt_short()))] - async fn handle_conn_output( - &mut self, - our_role: Role, - peer: NodeId, - out: Result, - ) -> Result<()> { + async fn handle_conn_output(&mut self, peer: NodeId, out: ConnStep) -> Result<()> { let peer_info = self .peers .get_mut(&peer) .context("got conn task output for unknown peer")?; match out { - Err(err) => { - trace!(?our_role, current_state=%peer_info.state, "conn task failed: {err:#?}"); + ConnStep::Established(Err(err)) => { + debug!(current_state=%peer_info.conn_state, "conn task failed while establishing: {err:#?}"); match err.downcast_ref() { Some(ConnectionError::LocallyClosed) => { // We cancelled the connection, nothing to do. @@ -425,76 +386,51 @@ impl PeerManager { if reason.error_code == ERROR_CODE_DUPLICATE_CONN => { debug!( - "connection was cancelled by the remote: simultaneous connection and their's wins" - ); - if our_role != peer_info.our_role { - // TODO: setup a timeout to kill intents if the other conn doesn't make it. - debug!("we are still waiting for their connection to arrive"); + "connection was cancelled by the remote: simultaneous connection and their's wins" + ); + if matches!( + &peer_info.conn_state, + ConnState::Establishing { + our_dial: Some(_), + .. + }, + ) { + peer_info.conn_state = ConnState::None; } + // if our_role != peer_info.our_role { + // // TODO: setup a timeout to kill intents if the other conn doesn't make it. + // debug!("we are still waiting for their connection to arrive"); + // } } _ => { - let peer = self.peers.remove(&peer).expect("just checked"); - match peer.state { - PeerState::Pending { intents, .. } => { - warn!(?err, "connection failed while pending"); - // If we were still in pending state, terminate all pending intents. - let err = Arc::new(Error::Net(err)); - join_all( - intents - .into_iter() - .map(|intent| intent.send_abort(err.clone())), - ) - .await; - } - PeerState::Closing { intents } => { - debug!(?err, "connection failed to close gracefully"); - // If we were are in closing state, we still forward the connection error to the intents. - // This would be the place where we'd implement retries: instead of aborting the intents, resubmit them. - // Right now, we only resubmit intents that were submitted while terminating a session, and only if the session closed gracefully. - let err = Arc::new(Error::Net(err)); - join_all( - intents - .into_iter() - .map(|intent| intent.send_abort(err.clone())), - ) - .await; + peer_info.conn_state = ConnState::None; + match &peer_info.session_state { + SessionState::None => { + peer_info + .abort_pending_intents(err.context("failed while establishing")) + .await; + self.peers.remove(&peer); } - PeerState::Active { update_tx, .. } => { - warn!(?err, "connection failed while active"); - update_tx - .send(SessionUpdate::Abort(Error::ConnectionClosed(err))) - .await - .ok(); - } - PeerState::None => { - warn!(?err, "connection failed while peer is in None state"); + SessionState::Active { .. } => { + // An establishing connection failed while an old session was still not terminated. + // We log the error and keep waiting for the session to terminate. This does not happen usually but can due to timings. + warn!("establish failed while session still not closed"); } } } } } - Ok(ConnStep::Ready { + ConnStep::Established(Ok(Established { + our_role, conn, initial_transmission, channel_streams, - }) => { - let PeerState::Pending { - ref mut intents, .. - } = &mut peer_info.state - else { - debug!( - ?our_role, - "got connection ready for peer in non-pending state" - ); - conn.close(ERROR_CODE_FAIL, b"invalid-state"); - drop(conn); - // TODO: unreachable? - return Err(anyhow!( - "got connection ready for peer in non-pending state" - )); + })) => { + let SessionState::None = peer_info.session_state else { + unreachable!("session must be inactive when connection establishes"); }; - let intents = std::mem::take(intents); + let intents = std::mem::take(&mut peer_info.pending_intents); if self.shutting_down { debug!("connection became ready while shutting down, abort"); @@ -511,7 +447,6 @@ impl PeerManager { return Ok(()); } - // TODO: Here we should check again that we are not establishing a duplicate connection. debug!(?our_role, "connection ready: init session"); let (channels, fut) = prepare_channels(channel_streams)?; let conn_handle = ConnHandle { @@ -520,10 +455,9 @@ impl PeerManager { our_role, peer, }; - peer_info.our_role = our_role; let session_handle = self.actor.init_session(conn_handle, intents).await?; - let fut = fut.map_ok(move |()| ConnStep::Done { conn }); + let fut = fut.map_ok(|()| conn).map(ConnStep::Done); let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); let SessionHandle { @@ -533,42 +467,74 @@ impl PeerManager { self.session_events_rx .insert(peer, ReceiverStream::new(event_rx)); - peer_info.state = PeerState::Active { - update_tx, - intents_after_close: vec![], - }; - peer_info.abort_handle = Some(abort_handle); + peer_info.conn_state = ConnState::Active { abort_handle }; + peer_info.session_state = SessionState::Active { update_tx }; } - Ok(ConnStep::Done { conn }) => { + ConnStep::Done(Ok(conn)) => { trace!("connection loop finished"); - let fut = async move { - terminate_gracefully(&conn).await?; - // The connection is fully closed. - drop(conn); - Ok(ConnStep::Closed) + let ConnState::Active { .. } = &peer_info.conn_state else { + unreachable!("connection state mismatch: Done comes after Active only"); + }; + if let SessionState::Active { .. } = &peer_info.session_state { + // TODO: Can this happen? + unreachable!( + "connection may not terminate gracefully while session is still active" + ); }; + let fut = async move { ConnStep::Closed(terminate_gracefully(conn).await) }; let abort_handle = spawn_conn_task(&mut self.conn_tasks, peer_info, fut); - if let PeerState::Closing { .. } = &peer_info.state { - peer_info.abort_handle = Some(abort_handle); + peer_info.conn_state = ConnState::Terminating { abort_handle }; + } + ConnStep::Done(Err(err)) => { + let ConnState::Active { .. } = &peer_info.conn_state else { + unreachable!("connection state mismatch: Done comes after Active only"); + }; + if let SessionState::Active { update_tx } = &peer_info.session_state { + warn!(?err, "connection failed while active"); + update_tx + .send(SessionUpdate::Abort(Error::ConnectionClosed(err))) + .await + .ok(); + peer_info.conn_state = ConnState::None; } else { - // TODO: What do we do with the closing abort handle in case we have a new connection already? + debug!(?err, "connection failed while on session is active"); + peer_info + .abort_pending_intents(err.context("failed while active")) + .await; + self.peers.remove(&peer); } } - Ok(ConnStep::Closed) => { - debug!("connection closed gracefully"); - let peer_info = self.peers.remove(&peer).expect("just checked"); - if let PeerState::Closing { intents } = peer_info.state { - if !intents.is_empty() { - debug!( - "resubmitting {} intents that were not yet processed", - intents.len() - ); - for intent in intents { - self.submit_intent(peer, intent).await; + ConnStep::Closed(res) => { + debug!(?res, "connection closed"); + match &peer_info.conn_state { + ConnState::Terminating { .. } => { + peer_info.conn_state = ConnState::None; + if !peer_info.pending_intents.is_empty() { + debug!("peer has pending intents, reconnect"); + match res { + Ok(()) => self.connect_if_inactive(peer), + Err(err) => { + peer_info + .abort_pending_intents( + err.context("failed while closing connection"), + ) + .await + } + } + } else if peer_info.session_state.is_none() { + debug!("removed peer"); + self.peers.remove(&peer).expect("just checked"); + } else { + debug!("keeping peer because session still closing"); } } - } else { - warn!(state=%peer_info.state, "reached closed step for peer in wrong state"); + ConnState::Establishing { .. } => { + debug!("conn is already establishing again"); + } + ConnState::Active { .. } => { + debug!("conn is already active again"); + } + ConnState::None => unreachable!("ConnState::Closed may not happen while None"), } } } @@ -578,37 +544,30 @@ impl PeerManager { async fn init_shutdown(&mut self) { self.shutting_down = true; for peer in self.peers.values() { - match &peer.state { - PeerState::None => {} - PeerState::Pending { .. } => { - // We are in pending state, which means the session has not yet been started. - // Hard-abort the task and let the other peer handle the error. - if let Some(abort_handle) = &peer.abort_handle { - abort_handle.abort(); - } - } - PeerState::Closing { .. } => {} - PeerState::Active { update_tx, .. } => { - // We are in active state. We cancel our session, which leads to graceful connection termination. - update_tx - .send(SessionUpdate::Abort(Error::ShuttingDown)) - .await - .ok(); - } + if let ConnState::Establishing { abort_handle, .. } = &peer.conn_state { + // We are in pending state, which means the session has not yet been started. + // Hard-abort the task and let the other peer handle the error. + abort_handle.abort(); + } + if let SessionState::Active { update_tx } = &peer.session_state { + // We are in active state. We cancel our session, which leads to graceful connection termination. + update_tx + .send(SessionUpdate::Abort(Error::ShuttingDown)) + .await + .ok(); } } } } fn spawn_conn_task( - conn_tasks: &mut JoinSet<(Role, NodeId, Result)>, + conn_tasks: &mut JoinSet<(NodeId, ConnStep)>, peer_info: &PeerInfo, - fut: impl Future> + Send + 'static, + fut: impl Future + Send + 'static, ) -> AbortHandle { let node_id = peer_info.node_id; - let our_role = peer_info.our_role; let fut = fut - .map(move |res| (our_role, node_id, res)) + .map(move |res| (node_id, res)) .instrument(peer_info.span.clone()); conn_tasks.spawn(fut) } @@ -616,52 +575,108 @@ fn spawn_conn_task( #[derive(Debug)] struct PeerInfo { node_id: NodeId, - our_role: Role, - abort_handle: Option, - state: PeerState, span: Span, + pending_intents: Vec, + conn_state: ConnState, + session_state: SessionState, } impl PeerInfo { - fn new(our_role: Role, peer: NodeId) -> Self { - Self { - node_id: peer, - our_role, - abort_handle: None, - state: PeerState::None, - span: error_span!("conn", peer=%peer.fmt_short()), + /// Returns `true` if the intent was pushed into the session channel and `false` if it was added to the pending intent list. + async fn push_intent(&mut self, intent: Intent) -> bool { + match &self.session_state { + SessionState::None => { + self.pending_intents.push(intent); + false + } + SessionState::Active { update_tx } => { + if let Err(err) = update_tx.send(SessionUpdate::SubmitIntent(intent)).await { + debug!("failed to submit intent into active session, queue in peer state"); + if let SessionUpdate::SubmitIntent(intent) = err.0 { + self.pending_intents.push(intent); + } + false + } else { + trace!("intent sent to session"); + true + } + } } } + + async fn abort_pending_intents(&mut self, err: anyhow::Error) { + let err = Arc::new(Error::Net(err)); + join_all( + self.pending_intents + .drain(..) + .map(|intent| intent.send_abort(err.clone())), + ) + .await; + } } -#[derive(Debug, strum::Display)] -enum PeerState { +#[derive(Debug, Default, strum::Display)] +enum SessionState { + #[default] None, - Pending { - intents: Vec, - cancel_dial: Option, - }, Active { update_tx: mpsc::Sender, - /// List of intents that we failed to submit into the session because it is closing. - intents_after_close: Vec, }, - Closing { - intents: Vec, +} + +impl SessionState { + pub fn is_none(&self) -> bool { + matches!(self, Self::None) + } +} + +#[derive(Debug, Default, strum::Display)] +enum ConnState { + #[default] + None, + Establishing { + our_dial: Option, + abort_handle: AbortHandle, }, + Active { + abort_handle: AbortHandle, + }, + Terminating { + abort_handle: AbortHandle, + }, +} + +impl ConnState { + pub fn is_none(&self) -> bool { + matches!(self, Self::None) + } +} + +impl PeerInfo { + fn new(peer: NodeId) -> Self { + Self { + node_id: peer, + span: error_span!("conn", peer=%peer.fmt_short()), + session_state: Default::default(), + conn_state: Default::default(), + pending_intents: Default::default(), + } + } +} + +#[derive(Debug)] +struct Established { + our_role: Role, + conn: Connection, + initial_transmission: InitialTransmission, + channel_streams: ChannelStreams, } #[derive(derive_more::Debug, strum::Display)] enum ConnStep { - Ready { - conn: Connection, - initial_transmission: InitialTransmission, - channel_streams: ChannelStreams, - }, - Done { - conn: Connection, - }, - Closed, + Established(anyhow::Result), + Done(anyhow::Result), + Closed(anyhow::Result<()>), } /// The internal handlers for the [`AcceptOpts]. diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 9006c867b5..45603d6e09 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -1,6 +1,6 @@ //! Networking implementation for iroh-willow. -use std::{future::Future, time::Duration}; +use std::{future::Future, io, time::Duration}; use anyhow::{anyhow, ensure, Context as _, Result}; use futures_concurrency::future::TryJoin; @@ -10,7 +10,7 @@ use iroh_net::endpoint::{ Connection, ConnectionError, ReadError, ReadExactError, RecvStream, SendStream, VarInt, }; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tracing::{debug, trace}; +use tracing::{debug, trace, warn}; use crate::{ proto::wgps::{ @@ -257,6 +257,11 @@ fn prepare_channel( (sender, receiver, fut) } +/// Error code when stopping receive streams because we closed our session. +/// +/// This is currently for debugging purposes only - the other end will still see this as a connection error. +const ERROR_CODE_SESSION_CLOSED: VarInt = VarInt::from_u32(1); + async fn recv_loop( channel: Channel, mut recv_stream: RecvStream, @@ -269,9 +274,18 @@ async fn recv_loop( .await .context("failed to read from quic stream")? { - // trace!(len = buf.bytes.len(), "read"); - channel_writer.write_all(&buf.bytes[..]).await?; - // trace!(len = buf.bytes.len(), "sent"); + trace!(len = buf.bytes.len(), "read"); + match channel_writer.write_all(&buf.bytes[..]).await { + Ok(()) => { + trace!(len = buf.bytes.len(), "sent"); + } + Err(err) if err.kind() == io::ErrorKind::BrokenPipe => { + debug!("closing recv channel: session closed"); + recv_stream.stop(ERROR_CODE_SESSION_CLOSED)?; + break; + } + Err(err) => return Err(err.into()), + } } trace!(?channel, "recv: stream close"); channel_writer.close(); @@ -295,6 +309,7 @@ async fn send_loop( } trace!(?channel, "send: close writer"); send_stream.finish()?; + send_stream.stopped().await?; // We don't await SendStream::stopped, because we rely on application level closing notifications, // and make sure that the connection is closed gracefully in any case. trace!(?channel, "send: done"); @@ -321,7 +336,7 @@ async fn send_loop( /// /// Returns an error if the termination flow was aborted prematurely or if the connection was not /// closed with the expected error code. -pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { +pub(crate) async fn terminate_gracefully(conn: Connection) -> Result<()> { trace!("terminating connection"); // Send a single byte on a newly opened uni stream. let mut send_stream = conn.open_uni().await?; @@ -329,7 +344,7 @@ pub(crate) async fn terminate_gracefully(conn: &Connection) -> Result<()> { send_stream.finish()?; // Wait until we either receive the goodbye byte from the other peer, or for the other peer // to close the connection with the expected error code. - match tokio::time::timeout(SHUTDOWN_TIMEOUT, wait_for_goodbye_or_graceful_close(conn)).await { + match tokio::time::timeout(SHUTDOWN_TIMEOUT, wait_for_goodbye_or_graceful_close(&conn)).await { Ok(Ok(())) => { conn.close(ERROR_CODE_OK, b"bye"); trace!("connection terminated gracefully"); @@ -574,8 +589,8 @@ mod tests { r2.unwrap(); tokio::try_join!( - terminate_gracefully(&conn_alfie), - terminate_gracefully(&conn_betty), + terminate_gracefully(conn_alfie), + terminate_gracefully(conn_betty), ) .expect("failed to close both connections gracefully"); @@ -747,8 +762,8 @@ mod tests { ); tokio::try_join!( - terminate_gracefully(&conn_alfie), - terminate_gracefully(&conn_betty), + terminate_gracefully(conn_alfie), + terminate_gracefully(conn_betty), ) .expect("failed to close both connections gracefully"); diff --git a/iroh-willow/src/proto/data_model.rs b/iroh-willow/src/proto/data_model.rs index 369e07eb1a..1f6ceddec7 100644 --- a/iroh-willow/src/proto/data_model.rs +++ b/iroh-willow/src/proto/data_model.rs @@ -86,6 +86,8 @@ pub type Path = willow_data_model::Path Result; + /// Debug-format the path as a lossy UTF-8 string. + fn fmt_utf8(&self) -> String; } impl PathExt for Path { @@ -104,6 +106,22 @@ impl PathExt for Path { Ok(path) } } + + fn fmt_utf8(&self) -> String { + let mut s = String::new(); + let mut iter = self.components().peekable(); + while let Some(c) = iter.next() { + if let Ok(c) = std::str::from_utf8(c.as_ref()) { + s.push_str(c); + } else { + s.push_str(&format!("<{}>", hex::encode(c.as_ref()))); + } + if iter.peek().is_some() { + s.push('/'); + } + } + s + } } #[derive(Debug, thiserror::Error)] diff --git a/iroh-willow/src/session.rs b/iroh-willow/src/session.rs index 2cf059e6c1..d12eae4f9a 100644 --- a/iroh-willow/src/session.rs +++ b/iroh-willow/src/session.rs @@ -42,7 +42,7 @@ pub(crate) type SessionId = u64; /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, /// and the other peer as Betty. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] pub enum Role { /// The peer that initiated the synchronisation session. Alfie, diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 8790f0181d..695dec1c73 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -4,11 +4,11 @@ use bytes::Bytes; use futures_lite::StreamExt; use genawaiter::rc::Co; use iroh_blobs::store::Store as PayloadStore; -use tracing::debug; +use tracing::{debug, trace}; use crate::{ proto::{ - data_model::PayloadDigest, + data_model::{PathExt, PayloadDigest}, grouping::{AreaExt, AreaOfInterest, Range3d}, keys::NamespaceId, wgps::{ @@ -98,11 +98,10 @@ impl Reconciler { loop { tokio::select! { Some(message) = self.recv.next() => { - tracing::trace!(?message, "tick: recv"); self.received_message(message?).await?; } Some(input) = self.targets.inbox.next() => { - tracing::trace!(?input, "tick: input"); + trace!(?input, "tick: input"); match input { Input::AoiIntersection(intersection) => { self.targets.init_target(&self.shared, intersection).await?; @@ -118,6 +117,7 @@ impl Reconciler { async fn received_message(&mut self, message: ReconciliationMessage) -> Result<(), Error> { match message { ReconciliationMessage::SendFingerprint(message) => { + trace!(range=?message.range, "recv SendFingerprint"); let target_id = message.handles(); let target = self .targets @@ -131,6 +131,7 @@ impl Reconciler { } } ReconciliationMessage::AnnounceEntries(message) => { + trace!(is_empty=?message.is_empty, range=?message.range, "recv AnnounceEntries"); let target_id = message.handles(); self.entry_state .received_announce_entries(target_id, message.is_empty)?; @@ -146,6 +147,11 @@ impl Reconciler { } } ReconciliationMessage::SendEntry(message) => { + trace!( + subspace = %message.entry.entry.subspace_id().fmt_short(), + path = %message.entry.entry.path().fmt_utf8(), + "recv SendEntry" + ); let authorised_entry = self .shared .static_tokens @@ -166,14 +172,18 @@ impl Reconciler { )?; } ReconciliationMessage::SendPayload(message) => { + trace!("recv SendPayload"); self.entry_state .received_send_payload(self.shared.store.payloads(), message.bytes) .await?; } - ReconciliationMessage::TerminatePayload(message) => { + ReconciliationMessage::TerminatePayload(ReconciliationTerminatePayload { + is_final, + }) => { + trace!(?is_final, "recv TerminatePayloade"); if let Some(completed_target) = self .entry_state - .received_terminate_payload(message.is_final) + .received_terminate_payload(is_final) .await? { let target = self @@ -528,7 +538,7 @@ impl Target { self.mark_our_next_range_pending(); } - // If we know for sure that our range is empty, we can skip creating the entry iterator alltogether. + // If we know for sure that our range is empty, we can skip creating the entry iterator. let mut iter = if is_empty { None } else { diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index 3b17561050..ce36738973 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -13,7 +13,9 @@ use std::task::{ready, Context, Poll, Waker}; use anyhow::Result; use futures_util::Stream; +use tracing::debug; +use crate::proto::data_model::PathExt; use crate::proto::grouping::Area; use crate::{ interest::{CapSelector, CapabilityPack}, @@ -255,6 +257,7 @@ impl EntryStore { && existing.is_newer_than(new) { // we cannot insert the entry, a newer entry exists + debug!(subspace=%entry.entry().subspace_id().fmt_short(), path=%entry.entry().path().fmt_utf8(), "skip ingest, already pruned"); return Ok(false); } if new.subspace_id() == existing.subspace_id() @@ -264,6 +267,7 @@ impl EntryStore { to_prune.push(i); } } + let pruned_count = to_prune.len(); for i in to_prune { let pruned = entries.remove(i); store.events.insert(move |id| { @@ -277,6 +281,7 @@ impl EntryStore { }); } entries.push(entry.clone()); + debug!(subspace=%entry.entry().subspace_id().fmt_short(), path=%entry.entry().path().fmt_utf8(), pruned=pruned_count, total=entries.len(), "ingest entry"); store .events .insert(|id| StoreEvent::Ingested(id, entry.clone(), origin)); diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index a98f664cac..fce3f66a0b 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -81,6 +81,7 @@ proptest = "1.2.0" rand_chacha = "0.3.1" regex = { version = "1.7.1", features = ["std"] } serde_json = "1.0.107" +test-strategy = "0.4.0" testdir = "0.9.1" testresult = "0.4.0" tokio = { version = "1", features = ["macros", "io-util", "rt"] } diff --git a/iroh/tests/spaces.proptest-regressions b/iroh/tests/spaces.proptest-regressions new file mode 100644 index 0000000000..156d17ee07 --- /dev/null +++ b/iroh/tests/spaces.proptest-regressions @@ -0,0 +1,30 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc b247c5db7888ec8f993852033ea7d612f5a7cc5e51d6dc80cbbf0b370f1bf9df # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [])] } +cc 10758efcbd4145b23bb48a35a5a93b13f42cc71457b18e8b2f521fb66537e94e # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("alpha", "gamma"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("beta", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("beta", "beta")]), (Betty, [Write("gamma", "gamma")]), (Alfie, [Write("gamma", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "alpha")]), (Alfie, [Write("beta", "gamma")]), (Betty, [Write("beta", "alpha"), Write("alpha", "alpha")]), (Alfie, [Write("alpha", "alpha"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta")]), (Betty, [Write("gamma", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("alpha", "beta")]), (Alfie, [Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "beta")]), (Alfie, [Write("gamma", "beta"), Write("beta", "gamma")]), (Betty, [Write("alpha", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("gamma", "beta")]), (Betty, [Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "gamma")]), (Alfie, [Write("gamma", "beta")]), (Betty, [Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("alpha", "alpha")]), (Betty, [Write("gamma", "beta"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("beta", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("alpha", "beta")])] } +cc bad55ca9718ab95bc85e0ee4581fcf9ca019f10ae8cd8b1c30acd2ab7fd03a7f # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "beta")]), (Betty, [Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("gamma", "beta"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("gamma", "gamma")]), (Betty, [Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "beta")]), (Alfie, [Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("beta", "beta")]), (Alfie, [Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta")]), (Alfie, [Write("beta", "alpha"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "beta")]), (Betty, [Write("alpha", "gamma"), Write("alpha", "alpha")]), (Alfie, [Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "gamma"), Write("alpha", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "gamma")]), (Betty, [Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "gamma")]), (Alfie, [Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma")])] } +cc 9c1851f6773562a9d437743f7033d15df566ef3ee865533a4d197120af731891 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "alpha")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "alpha")]), (Betty, [Write("gamma", "alpha"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("alpha", "beta")]), (Alfie, [Write("beta", "alpha"), Write("alpha", "beta"), Write("alpha", "beta"), Write("beta", "gamma")]), (Betty, [Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "gamma")]), (Alfie, [Write("beta", "alpha"), Write("gamma", "gamma")]), (Betty, [Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "alpha")]), (Alfie, [Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha")]), (Alfie, [])] } +cc 2bd80650f13377a3e39bbbf73c0fcf1f17b056880651abfc68e75f66a7f3c130 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("beta", "beta"), Write("beta", "beta"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "beta"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("beta", "beta")]), (Alfie, [Write("alpha", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma")]), (Betty, [Write("gamma", "beta"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "beta")]), (Alfie, [Write("alpha", "beta")]), (Alfie, [Write("gamma", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma")]), (Betty, [Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta")]), (Betty, []), (Alfie, [Write("gamma", "alpha")]), (Betty, [Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "beta")]), (Betty, [Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha")]), (Alfie, [Write("alpha", "gamma")])] } +cc 48f0a951e77785086a8625b0e5afeb4a25e49fd1923e707eebc42d30c430a144 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("beta", "beta")]), (Alfie, [Write("gamma", "beta"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "beta")]), (Betty, [Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("beta", "gamma")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("gamma", "beta"), Write("gamma", "gamma")]), (Betty, [Write("alpha", "beta"), Write("beta", "beta")]), (Alfie, [Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "gamma")]), (Alfie, [Write("alpha", "gamma"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma")]), (Betty, [Write("beta", "gamma"), Write("beta", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "alpha")]), (Betty, [Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "gamma")]), (Alfie, [Write("alpha", "alpha"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha")]), (Alfie, [Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("gamma", "beta")]), (Alfie, [Write("beta", "beta"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("alpha", "gamma")]), (Alfie, [Write("beta", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("beta", "alpha"), Write("alpha", "beta")]), (Betty, [Write("alpha", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("beta", "beta"), Write("alpha", "gamma")]), (Betty, [Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha")]), (Betty, [Write("beta", "gamma"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "gamma")]), (Alfie, [Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("beta", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "beta")])] } +cc fd7a666da43de4a6647fd7a5b7c543c4f11abde19a3d8592d3845226bc964f1c # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [] } +cc 42fc5284840d3b6e58d5650c131a3ab6e7528fc98fb4c4fb77097e29715326f5 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [] } +cc cef4354611d13f5c0cdecbc409eb54eea1ef59512c272875d01040b187db84ea # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "beta")]), (Alfie, [Write("beta", "alpha")]), (Alfie, [Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("alpha", "beta")]), (Betty, []), (Alfie, [Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("beta", "alpha")]), (Betty, [Write("alpha", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("gamma", "beta"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "beta")]), (Alfie, [Write("beta", "beta")]), (Betty, [Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma")]), (Alfie, [Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma")]), (Betty, [Write("gamma", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("alpha", "beta"), Write("beta", "alpha"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha")]), (Alfie, [Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "beta"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha")]), (Betty, [Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "beta")]), (Alfie, [Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("alpha", "beta")])] } +cc cfd6874efc9b42a5cad679512edfb09332852f4919920b2dde117e7039edff5a # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("alpha", "alpha"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("beta", "beta")]), (Alfie, [Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("alpha", "alpha")]), (Betty, [Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "beta"), Write("alpha", "beta"), Write("beta", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma")]), (Alfie, [Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "alpha")]), (Alfie, [Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "beta")]), (Alfie, [Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "alpha")]), (Betty, [Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta")]), (Betty, [Write("beta", "alpha")]), (Betty, [Write("gamma", "alpha"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "gamma")]), (Alfie, [Write("alpha", "alpha"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("alpha", "beta")]), (Alfie, [Write("gamma", "beta"), Write("gamma", "beta")]), (Alfie, [Write("gamma", "beta"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha")]), (Betty, [Write("gamma", "beta"), Write("beta", "alpha")]), (Alfie, [Write("beta", "gamma"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma")]), (Betty, [Write("gamma", "alpha")])] } +cc 61e5a9d3c5dc02a1fe0ebb0a357d33c0c7eb5340a6b0982a9e6d187103366062 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("gamma", "gamma"), Write("alpha", "alpha")]), (Alfie, [Write("alpha", "beta"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("alpha", "gamma")]), (Betty, [Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("beta", "gamma"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "gamma")]), (Betty, [Write("beta", "gamma"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "beta")]), (Betty, [Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("alpha", "beta"), Write("alpha", "beta"), Write("alpha", "beta")]), (Betty, [Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma")]), (Betty, [Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("beta", "gamma")]), (Alfie, [Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("beta", "alpha"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "alpha")]), (Betty, [Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "beta"), Write("beta", "beta")]), (Alfie, [Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "beta")]), (Betty, [Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "gamma"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "gamma")]), (Alfie, [Write("gamma", "beta"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "alpha")]), (Betty, [Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "beta")])] } +cc 7ad26b3a87698eedb995dfcb549ddf6e730b900d794e3645e9193a3d0ba942af # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "beta")]), (Betty, [Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha")]), (Alfie, [Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("gamma", "alpha")]), (Alfie, [Write("beta", "alpha"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma")]), (Betty, [Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "beta")]), (Alfie, [Write("gamma", "alpha"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("alpha", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "beta")]), (Betty, [Write("beta", "gamma"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("beta", "beta"), Write("beta", "gamma")])] } +cc 66d537ccb5b41cbc39884aef2c35e3c6242a37973888a30f87d9918f3e626fca # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Alfie, [Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha")]), (Alfie, [Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("gamma", "gamma")]), (Betty, [Write("gamma", "alpha"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "gamma")]), (Alfie, [Write("beta", "gamma"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("beta", "beta")]), (Betty, [Write("alpha", "beta"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha")]), (Alfie, [Write("alpha", "gamma"), Write("beta", "alpha"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("gamma", "alpha")]), (Betty, [Write("gamma", "beta"), Write("alpha", "alpha")]), (Alfie, [Write("gamma", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "beta")]), (Alfie, []), (Alfie, [Write("alpha", "beta"), Write("gamma", "beta"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "alpha")]), (Betty, [Write("gamma", "alpha"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("alpha", "beta"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "gamma")]), (Alfie, [Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "beta"), Write("beta", "alpha"), Write("alpha", "beta")]), (Alfie, [Write("beta", "gamma"), Write("beta", "gamma"), Write("beta", "beta")]), (Betty, [Write("alpha", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma")]), (Alfie, [Write("gamma", "gamma"), Write("beta", "alpha"), Write("alpha", "beta"), Write("beta", "gamma"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "beta"), Write("beta", "gamma")])] } +cc a7a7d234a4fbe2d760f1cc6000ba506da4e1a36e06ec8303a1877ff006242c84 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("beta", "beta")]), (Alfie, [Write("gamma", "alpha"), Write("alpha", "alpha"), Write("beta", "beta"), Write("alpha", "gamma"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("beta", "beta")]), (Alfie, [Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "gamma"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("beta", "beta")]), (Betty, [Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("gamma", "gamma")]), (Alfie, [Write("gamma", "gamma"), Write("gamma", "alpha"), Write("beta", "alpha")]), (Betty, [Write("beta", "gamma"), Write("beta", "beta"), Write("alpha", "beta"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha")]), (Betty, [Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "alpha"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "alpha")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "beta"), Write("alpha", "beta")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "beta"), Write("beta", "alpha"), Write("beta", "alpha")])] } +cc 8345ae470aaefbc75795860875a4a379be7a30a96c8108c87d0f7473278f55b8 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "beta"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("alpha", "beta"), Write("beta", "gamma")]), (Alfie, [Write("gamma", "beta"), Write("beta", "alpha"), Write("gamma", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "beta"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("alpha", "gamma")]), (Betty, [Write("alpha", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("alpha", "beta")]), (Alfie, [Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "beta")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("alpha", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("beta", "beta"), Write("beta", "gamma"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("beta", "alpha")]), (Alfie, [Write("gamma", "alpha"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "alpha"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "gamma")]), (Betty, [Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "alpha"), Write("gamma", "beta"), Write("beta", "beta"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("gamma", "alpha")]), (Betty, [Write("alpha", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("gamma", "alpha")]), (Alfie, [Write("beta", "gamma"), Write("alpha", "beta")]), (Betty, [Write("beta", "gamma"), Write("alpha", "beta")]), (Alfie, [Write("gamma", "gamma"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("beta", "beta")])] } +cc f4f91399efad219908f9467397d047a6319e1111571cf08574f93fa8da8d1f06 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Betty, [Write("beta", "alpha"), Write("alpha", "alpha"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "alpha"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "beta")]), (Betty, [Write("alpha", "beta"), Write("gamma", "alpha"), Write("gamma", "alpha"), Write("beta", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "alpha"), Write("gamma", "beta"), Write("gamma", "beta"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("gamma", "beta"), Write("alpha", "gamma"), Write("gamma", "alpha"), Write("gamma", "beta")]), (Alfie, [Write("alpha", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("beta", "alpha")]), (Alfie, [Write("beta", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("alpha", "gamma"), Write("gamma", "beta"), Write("alpha", "alpha"), Write("beta", "alpha")]), (Alfie, [Write("beta", "beta"), Write("beta", "beta")]), (Alfie, [Write("gamma", "gamma"), Write("beta", "beta"), Write("alpha", "alpha"), Write("beta", "beta"), Write("beta", "alpha")]), (Alfie, [Write("beta", "alpha"), Write("gamma", "alpha"), Write("alpha", "alpha"), Write("alpha", "alpha"), Write("alpha", "beta"), Write("gamma", "gamma"), Write("alpha", "beta"), Write("gamma", "beta"), Write("gamma", "beta"), Write("beta", "beta"), Write("gamma", "beta"), Write("gamma", "alpha"), Write("beta", "gamma"), Write("beta", "gamma")]), (Betty, [Write("gamma", "alpha"), Write("gamma", "gamma"), Write("gamma", "alpha"), Write("alpha", "gamma"), Write("alpha", "gamma"), Write("beta", "gamma"), Write("beta", "beta"), Write("gamma", "beta"), Write("beta", "alpha")]), (Alfie, [Write("gamma", "alpha"), Write("gamma", "beta")])] } +cc d08210148ec737525059d04cef06d7c8911c32d67ecc9be9c2d9036493a6b0f8 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(X, [Write("alpha", "alpha"), Write("gamma", "alpha")]), (X, [Write("beta", "gamma"), Write("beta", "alpha"), Write("beta", "beta"), Write("alpha", "beta"), Write("beta", "beta"), Write("gamma", "gamma"), Write("alpha", "gamma"), Write("alpha", "beta"), Write("gamma", "alpha"), Write("beta", "alpha"), Write("beta", "gamma"), Write("gamma", "alpha"), Write("alpha", "beta"), Write("beta", "gamma")]), (Y, [Write("alpha", "alpha"), Write("alpha", "gamma"), Write("beta", "alpha"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "gamma"), Write("alpha", "beta"), Write("beta", "alpha")])] } +cc d1712b6b1cbada8a7fb7793bf1f53d562a2b2abef0842c2f533ec140da37f763 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(X, [Write("alpha", "green"), Write("beta", "green"), Write("gamma", "green")]), (Y, []), (Y, [Write("beta", "green"), Write("gamma", "red"), Write("beta", "red"), Write("gamma", "green"), Write("gamma", "green"), Write("beta", "blue"), Write("beta", "green"), Write("alpha", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("alpha", "green"), Write("beta", "red"), Write("gamma", "green"), Write("gamma", "green"), Write("beta", "green"), Write("gamma", "blue"), Write("alpha", "red")])] } +cc 52d30497f1066cca6e7f81e2e643ed6db7f7308708401f88e238bb0df583f2c1 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Y, [Write("gamma", "blue"), Write("gamma", "green"), Write("beta", "blue"), Write("gamma", "blue"), Write("gamma", "green"), Write("beta", "green"), Write("alpha", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("beta", "blue"), Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("alpha", "green"), Write("gamma", "red"), Write("alpha", "green")])] } +cc 81d02a44e9205146e30df256f51c3e306dbcc71ed54b8c5f6d5b9c6b011d73b6 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Y, [Write("beta", "red"), Write("gamma", "red")]), (X, [Write("beta", "green"), Write("gamma", "red")]), (X, [Write("beta", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "blue")]), (Y, [Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "blue"), Write("beta", "blue"), Write("gamma", "green"), Write("beta", "red"), Write("alpha", "red"), Write("alpha", "blue"), Write("beta", "blue"), Write("gamma", "blue"), Write("alpha", "blue"), Write("beta", "green"), Write("gamma", "red"), Write("beta", "red"), Write("beta", "red")]), (X, [Write("beta", "blue"), Write("alpha", "blue"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("alpha", "red"), Write("alpha", "red"), Write("gamma", "red"), Write("alpha", "blue"), Write("alpha", "red")]), (X, [Write("beta", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "green"), Write("beta", "green"), Write("beta", "blue"), Write("gamma", "blue"), Write("beta", "red"), Write("alpha", "blue"), Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "red")]), (Y, [Write("alpha", "green"), Write("beta", "green"), Write("alpha", "red"), Write("beta", "blue"), Write("alpha", "green"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "green"), Write("beta", "red"), Write("beta", "blue"), Write("beta", "green"), Write("alpha", "green"), Write("gamma", "green")]), (Y, [Write("beta", "blue"), Write("alpha", "blue"), Write("alpha", "red"), Write("alpha", "blue"), Write("alpha", "green"), Write("beta", "blue"), Write("gamma", "red"), Write("beta", "red"), Write("alpha", "blue"), Write("gamma", "blue"), Write("gamma", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("alpha", "green"), Write("beta", "blue"), Write("beta", "red"), Write("gamma", "green")]), (X, [Write("gamma", "red"), Write("alpha", "red"), Write("gamma", "blue"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "blue"), Write("gamma", "blue"), Write("beta", "blue")]), (Y, [Write("beta", "red"), Write("alpha", "red"), Write("beta", "red"), Write("beta", "red"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("alpha", "red"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "red"), Write("beta", "green"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "green"), Write("gamma", "red"), Write("beta", "blue")]), (X, [Write("beta", "red"), Write("beta", "red"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "green"), Write("beta", "red"), Write("alpha", "green")]), (X, [Write("alpha", "green"), Write("gamma", "red"), Write("beta", "blue"), Write("beta", "green"), Write("alpha", "red"), Write("beta", "red"), Write("beta", "green"), Write("alpha", "green"), Write("alpha", "green"), Write("gamma", "green"), Write("beta", "red"), Write("alpha", "green")]), (Y, [Write("alpha", "red"), Write("beta", "red"), Write("alpha", "green"), Write("gamma", "red"), Write("beta", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("alpha", "red")]), (X, [Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "red"), Write("gamma", "green"), Write("beta", "green"), Write("beta", "red"), Write("gamma", "blue")]), (X, [Write("beta", "blue"), Write("gamma", "green"), Write("gamma", "red")]), (Y, [Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("gamma", "green"), Write("gamma", "blue"), Write("beta", "blue"), Write("alpha", "green")]), (X, [Write("gamma", "blue")])] } +cc 1bf6826f5eed0f39830227d43f7dfad1e043474fe580ffd44ddbb396631860eb # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Y, [Write("beta", "blue"), Write("alpha", "red"), Write("beta", "blue"), Write("alpha", "green"), Write("alpha", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("alpha", "blue"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "red"), Write("alpha", "red"), Write("alpha", "blue"), Write("gamma", "blue"), Write("gamma", "red"), Write("alpha", "blue")]), (Y, [Write("alpha", "red"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("gamma", "red"), Write("beta", "green"), Write("alpha", "red"), Write("beta", "red"), Write("alpha", "red"), Write("alpha", "green"), Write("gamma", "green"), Write("alpha", "blue")]), (Y, [Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("gamma", "red"), Write("beta", "red"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "green")]), (Y, [Write("alpha", "red"), Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "red"), Write("beta", "green"), Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "red"), Write("beta", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("beta", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "blue")]), (Y, [Write("alpha", "green"), Write("alpha", "red"), Write("alpha", "red"), Write("gamma", "red")]), (Y, [Write("beta", "blue"), Write("beta", "green"), Write("gamma", "blue"), Write("gamma", "blue"), Write("beta", "green"), Write("beta", "green"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "blue"), Write("beta", "blue"), Write("gamma", "green"), Write("gamma", "blue"), Write("gamma", "blue"), Write("gamma", "green")]), (Y, [Write("beta", "red"), Write("beta", "green"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "red"), Write("beta", "red"), Write("beta", "blue"), Write("beta", "blue"), Write("beta", "blue"), Write("alpha", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("alpha", "red"), Write("alpha", "red"), Write("gamma", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("gamma", "green"), Write("alpha", "green")]), (Y, [Write("gamma", "green"), Write("gamma", "green"), Write("beta", "blue"), Write("beta", "blue"), Write("alpha", "blue"), Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "blue"), Write("beta", "blue"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "red")]), (X, [Write("gamma", "blue"), Write("alpha", "red"), Write("beta", "green"), Write("gamma", "red"), Write("beta", "red"), Write("alpha", "green"), Write("alpha", "blue"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "green")]), (X, [Write("beta", "red"), Write("alpha", "green"), Write("alpha", "green")]), (X, [Write("beta", "green"), Write("gamma", "red"), Write("beta", "red"), Write("gamma", "green"), Write("gamma", "blue"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "blue"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("alpha", "blue"), Write("alpha", "red"), Write("beta", "blue"), Write("beta", "blue")]), (X, [Write("beta", "blue"), Write("alpha", "red"), Write("beta", "green"), Write("gamma", "green"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("gamma", "blue"), Write("gamma", "blue"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("beta", "red"), Write("alpha", "blue"), Write("alpha", "red")]), (X, [Write("gamma", "green"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "blue"), Write("alpha", "green"), Write("alpha", "red")]), (X, [Write("alpha", "green"), Write("beta", "red"), Write("gamma", "green"), Write("beta", "red"), Write("gamma", "green"), Write("gamma", "red"), Write("gamma", "red"), Write("alpha", "red"), Write("beta", "blue")]), (Y, [Write("alpha", "blue"), Write("alpha", "green"), Write("gamma", "blue"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("beta", "blue")]), (Y, [Write("beta", "blue"), Write("gamma", "green"), Write("beta", "green"), Write("beta", "green"), Write("gamma", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "green"), Write("gamma", "red")]), (X, [Write("beta", "green"), Write("alpha", "blue"), Write("alpha", "green"), Write("beta", "red"), Write("beta", "green")]), (X, [Write("alpha", "green"), Write("beta", "blue"), Write("beta", "green"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "red"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "green"), Write("beta", "red"), Write("gamma", "red"), Write("gamma", "red")]), (X, [Write("alpha", "green"), Write("gamma", "red"), Write("gamma", "green"), Write("beta", "green"), Write("beta", "blue"), Write("alpha", "blue"), Write("alpha", "green"), Write("beta", "green"), Write("beta", "red"), Write("beta", "red"), Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "green"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "green")])] } +cc b3451ba74bf6d3578cd59d523609b25e6d26e72d542bfd2928106263ab0c0317 # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Y, [Write("alpha", "blue"), Write("gamma", "red")]), (Y, [Write("beta", "green"), Write("beta", "blue"), Write("gamma", "green"), Write("gamma", "blue"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "red"), Write("beta", "green"), Write("gamma", "green"), Write("beta", "red"), Write("alpha", "red"), Write("gamma", "blue")]), (X, []), (X, [Write("beta", "red"), Write("beta", "red"), Write("beta", "blue"), Write("beta", "green"), Write("beta", "blue"), Write("gamma", "red"), Write("gamma", "green"), Write("beta", "red"), Write("gamma", "blue"), Write("beta", "blue")]), (X, [Write("alpha", "green"), Write("gamma", "green")]), (X, [Write("alpha", "green"), Write("beta", "blue"), Write("gamma", "red"), Write("gamma", "red"), Write("gamma", "green"), Write("alpha", "red"), Write("gamma", "blue"), Write("gamma", "red"), Write("beta", "red"), Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "blue"), Write("beta", "green"), Write("alpha", "blue"), Write("beta", "red"), Write("beta", "blue")]), (Y, [Write("gamma", "blue"), Write("alpha", "blue"), Write("beta", "green"), Write("alpha", "green"), Write("gamma", "green"), Write("gamma", "red"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "red"), Write("beta", "red"), Write("gamma", "green"), Write("beta", "red"), Write("alpha", "green"), Write("beta", "green"), Write("beta", "blue")]), (X, [Write("gamma", "green"), Write("alpha", "green"), Write("beta", "green"), Write("gamma", "red"), Write("beta", "red"), Write("alpha", "red")]), (Y, [Write("alpha", "red"), Write("beta", "red")]), (Y, [Write("alpha", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("alpha", "red"), Write("alpha", "blue"), Write("alpha", "green"), Write("gamma", "green"), Write("alpha", "blue")]), (Y, [Write("gamma", "blue"), Write("beta", "blue"), Write("alpha", "red"), Write("gamma", "red"), Write("beta", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "green"), Write("alpha", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("beta", "green"), Write("gamma", "red"), Write("beta", "red")]), (Y, [Write("beta", "red"), Write("beta", "green"), Write("beta", "green")]), (X, [Write("gamma", "blue"), Write("alpha", "green"), Write("gamma", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "blue"), Write("gamma", "blue"), Write("beta", "blue"), Write("beta", "red"), Write("gamma", "green"), Write("gamma", "red")]), (Y, [Write("beta", "blue"), Write("beta", "blue"), Write("alpha", "red"), Write("alpha", "red"), Write("alpha", "blue"), Write("gamma", "red"), Write("gamma", "green"), Write("alpha", "green"), Write("beta", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("beta", "green"), Write("beta", "green"), Write("alpha", "red"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "green")])] } +cc ba16827de785794376a4a534790de30db562c8ae30d79ce9e9c113215aa7ec4b # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(Y, [Write("beta", "red"), Write("beta", "blue"), Write("beta", "red"), Write("beta", "green"), Write("beta", "blue"), Write("beta", "red"), Write("beta", "green")]), (Y, [Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "green"), Write("gamma", "green"), Write("beta", "green"), Write("alpha", "blue"), Write("beta", "blue"), Write("gamma", "green"), Write("beta", "green")]), (X, [Write("alpha", "green"), Write("beta", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("gamma", "green"), Write("alpha", "red"), Write("alpha", "green"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "red"), Write("gamma", "red"), Write("beta", "green"), Write("beta", "red"), Write("alpha", "red"), Write("gamma", "green"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "blue"), Write("gamma", "red")]), (X, [Write("alpha", "red"), Write("gamma", "blue"), Write("alpha", "blue"), Write("beta", "red"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "blue"), Write("alpha", "red"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "red"), Write("beta", "red"), Write("gamma", "blue"), Write("gamma", "green")]), (Y, [Write("gamma", "blue"), Write("beta", "blue")]), (X, [Write("beta", "blue"), Write("alpha", "red"), Write("gamma", "blue"), Write("alpha", "green")]), (Y, [Write("beta", "blue"), Write("gamma", "green"), Write("beta", "red"), Write("beta", "blue"), Write("gamma", "blue"), Write("beta", "blue"), Write("beta", "green")]), (Y, [Write("alpha", "blue"), Write("alpha", "red"), Write("alpha", "blue"), Write("beta", "green"), Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "blue")]), (Y, [Write("alpha", "green"), Write("beta", "green"), Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("gamma", "green"), Write("beta", "blue"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "red"), Write("alpha", "green"), Write("gamma", "red")]), (Y, [Write("alpha", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "red"), Write("gamma", "green"), Write("alpha", "red")]), (Y, [Write("gamma", "green"), Write("beta", "green"), Write("beta", "blue"), Write("alpha", "green"), Write("beta", "green"), Write("gamma", "blue"), Write("gamma", "green"), Write("alpha", "green"), Write("gamma", "red"), Write("beta", "green"), Write("beta", "red"), Write("beta", "green"), Write("gamma", "red"), Write("gamma", "red")]), (Y, [Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "blue"), Write("beta", "green"), Write("beta", "blue"), Write("alpha", "green"), Write("gamma", "red"), Write("beta", "red"), Write("beta", "green"), Write("gamma", "blue"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "blue"), Write("gamma", "blue"), Write("beta", "green")]), (Y, [Write("beta", "green"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "green"), Write("beta", "blue"), Write("alpha", "blue"), Write("beta", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("gamma", "blue"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "blue"), Write("beta", "blue"), Write("gamma", "red")]), (Y, [Write("gamma", "green"), Write("beta", "blue"), Write("alpha", "green"), Write("beta", "blue"), Write("beta", "red"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "green"), Write("alpha", "green"), Write("gamma", "red"), Write("gamma", "blue")]), (Y, [Write("alpha", "green"), Write("beta", "red"), Write("gamma", "green"), Write("alpha", "green"), Write("alpha", "green"), Write("alpha", "red"), Write("beta", "blue"), Write("beta", "blue"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "red"), Write("alpha", "red"), Write("alpha", "green")]), (X, [Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "blue"), Write("beta", "blue"), Write("gamma", "blue"), Write("alpha", "red"), Write("gamma", "green"), Write("gamma", "green"), Write("beta", "red"), Write("beta", "blue"), Write("gamma", "red"), Write("alpha", "red"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "green")]), (Y, [Write("gamma", "red"), Write("alpha", "red"), Write("beta", "green"), Write("gamma", "red"), Write("gamma", "blue"), Write("gamma", "red"), Write("beta", "blue"), Write("alpha", "blue"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "red"), Write("beta", "red"), Write("beta", "red"), Write("alpha", "green")]), (Y, [Write("gamma", "green")])] } +cc f7035816a33aa12aab5db8a46f69789d339c4610dc800ced7d359806511a4b7a # shrinks to input = _TestGetManyWeirdResultArgs { rounds: [(X, [Write("alpha", "red"), Write("alpha", "blue"), Write("alpha", "green"), Write("gamma", "blue"), Write("beta", "red"), Write("gamma", "blue"), Write("beta", "green"), Write("beta", "red"), Write("alpha", "green"), Write("alpha", "green"), Write("alpha", "green"), Write("beta", "green"), Write("alpha", "green"), Write("beta", "red")]), (X, [Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "blue"), Write("alpha", "blue"), Write("beta", "blue"), Write("beta", "blue"), Write("beta", "green"), Write("beta", "red"), Write("alpha", "blue"), Write("alpha", "red"), Write("alpha", "blue"), Write("gamma", "red"), Write("beta", "blue"), Write("alpha", "green")]), (Y, [Write("gamma", "blue"), Write("gamma", "red"), Write("gamma", "blue"), Write("beta", "red"), Write("alpha", "red"), Write("beta", "green"), Write("beta", "green"), Write("gamma", "green"), Write("alpha", "red"), Write("alpha", "red"), Write("alpha", "red"), Write("gamma", "red"), Write("beta", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("alpha", "red")]), (X, [Write("beta", "red"), Write("alpha", "red"), Write("gamma", "blue"), Write("beta", "red"), Write("beta", "red"), Write("beta", "blue"), Write("beta", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "blue"), Write("alpha", "red"), Write("beta", "red"), Write("beta", "red")]), (X, [Write("beta", "red"), Write("gamma", "red"), Write("beta", "red"), Write("beta", "blue"), Write("alpha", "red"), Write("alpha", "blue"), Write("alpha", "green")]), (X, [Write("beta", "green"), Write("alpha", "blue"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("alpha", "red"), Write("gamma", "green"), Write("beta", "red"), Write("alpha", "red"), Write("beta", "blue"), Write("alpha", "red"), Write("beta", "red")]), (X, [Write("beta", "green"), Write("alpha", "blue"), Write("beta", "blue"), Write("beta", "red"), Write("alpha", "red"), Write("beta", "green"), Write("alpha", "red"), Write("alpha", "blue"), Write("beta", "green"), Write("beta", "blue"), Write("beta", "blue"), Write("alpha", "blue"), Write("gamma", "green"), Write("gamma", "red"), Write("beta", "green"), Write("gamma", "green"), Write("beta", "green"), Write("alpha", "green")]), (X, [Write("gamma", "green"), Write("alpha", "blue"), Write("beta", "red"), Write("alpha", "green"), Write("beta", "blue"), Write("beta", "red"), Write("beta", "blue"), Write("beta", "red"), Write("alpha", "blue"), Write("beta", "blue"), Write("alpha", "blue"), Write("gamma", "blue"), Write("alpha", "blue"), Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "red")]), (Y, [Write("alpha", "green"), Write("beta", "blue"), Write("alpha", "red"), Write("gamma", "blue"), Write("alpha", "red"), Write("alpha", "green"), Write("alpha", "green"), Write("beta", "red"), Write("alpha", "red"), Write("gamma", "green"), Write("alpha", "red"), Write("alpha", "green")]), (X, [Write("alpha", "red"), Write("alpha", "blue"), Write("alpha", "blue")]), (X, [Write("beta", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "blue"), Write("alpha", "blue"), Write("gamma", "red"), Write("alpha", "blue"), Write("beta", "red"), Write("beta", "red"), Write("beta", "green"), Write("alpha", "green"), Write("beta", "red"), Write("beta", "green")]), (X, [Write("gamma", "blue"), Write("beta", "green"), Write("gamma", "green"), Write("alpha", "blue"), Write("alpha", "green"), Write("beta", "green"), Write("alpha", "red"), Write("gamma", "green")]), (X, [Write("alpha", "blue"), Write("alpha", "blue"), Write("alpha", "green"), Write("gamma", "red"), Write("alpha", "blue"), Write("gamma", "green"), Write("gamma", "green")]), (X, [Write("gamma", "green"), Write("alpha", "blue"), Write("beta", "blue")]), (X, [Write("gamma", "green"), Write("gamma", "green")]), (Y, [Write("alpha", "green"), Write("alpha", "green"), Write("beta", "blue"), Write("gamma", "blue"), Write("gamma", "blue"), Write("gamma", "red"), Write("beta", "blue")]), (Y, [Write("beta", "red"), Write("gamma", "blue"), Write("beta", "blue"), Write("alpha", "blue"), Write("beta", "green"), Write("alpha", "green"), Write("alpha", "green"), Write("alpha", "red"), Write("gamma", "red"), Write("gamma", "red"), Write("gamma", "green"), Write("gamma", "green"), Write("alpha", "green"), Write("beta", "blue"), Write("gamma", "blue"), Write("beta", "red"), Write("gamma", "blue")]), (Y, [Write("beta", "green"), Write("alpha", "red"), Write("alpha", "green"), Write("gamma", "green"), Write("alpha", "green"), Write("beta", "green"), Write("gamma", "blue"), Write("beta", "blue"), Write("gamma", "blue"), Write("alpha", "blue"), Write("beta", "red"), Write("gamma", "blue"), Write("gamma", "blue"), Write("gamma", "green"), Write("gamma", "red"), Write("alpha", "red"), Write("gamma", "red")])] } diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index c00ccac660..d7becda3f5 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -1,19 +1,27 @@ -use anyhow::Result; +use std::{collections::BTreeMap, time::Duration}; + +use anyhow::ensure; use futures_lite::StreamExt; -use iroh::client::{spaces::EntryForm, Iroh}; +use iroh::client::{ + spaces::{EntryForm, Space}, + Iroh, +}; use iroh_net::{key::SecretKey, NodeAddr}; use iroh_willow::{ - interest::{CapSelector, DelegateTo, RestrictArea}, + interest::{AreaOfInterestSelector, CapSelector, DelegateTo, RestrictArea}, proto::{ data_model::{Path, PathExt}, grouping::{Area, Range3d}, - keys::NamespaceKind, + keys::{NamespaceKind, UserId}, meadowcap::AccessMode, }, session::{intents::Completion, SessionMode}, store::traits::{EntryOrigin, StoreEvent}, }; -use tracing::info; +use proptest::{collection::vec, prelude::Strategy, sample::select}; +use test_strategy::proptest; +use testresult::TestResult; +use tracing::{error, info}; /// Spawn an iroh node in a separate thread and tokio runtime, and return /// the address and client. @@ -41,8 +49,220 @@ async fn spawn_node() -> (NodeAddr, Iroh) { receiver.await.unwrap() } +#[derive(Debug, Clone)] +enum Operation { + Write(String, String), +} + +fn simple_key() -> impl Strategy { + select(&["alpha", "beta", "gamma"]).prop_map(str::to_string) +} + +fn simple_value() -> impl Strategy { + select(&["red", "blue", "green"]).prop_map(str::to_string) +} + +fn simple_op() -> impl Strategy { + (simple_key(), simple_value()).prop_map(|(key, value)| Operation::Write(key, value)) +} + +fn role() -> impl Strategy { + select(&[Peer::X, Peer::Y]) +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd)] +enum Peer { + X, + Y, +} + +#[proptest] +fn test_get_many_weird_result( + #[strategy(vec((role(), vec(simple_op(), 0..20)), 0..20))] rounds: Vec<(Peer, Vec)>, +) { + iroh_test::logging::setup_multithreaded(); + + let res = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { + let mut simulated_entries: BTreeMap<(Peer, String), String> = BTreeMap::new(); + + let (addr_x, iroh_x) = spawn_node().await; + let (addr_y, iroh_y) = spawn_node().await; + let node_id_x = addr_x.node_id; + let node_id_y = addr_y.node_id; + iroh_x.net().add_node_addr(addr_y.clone()).await?; + iroh_y.net().add_node_addr(addr_x.clone()).await?; + let user_x = iroh_x.spaces().create_user().await?; + let user_y = iroh_y.spaces().create_user().await?; + info!( + "X is node {} user {}", + node_id_x.fmt_short(), + user_x.fmt_short() + ); + info!( + "Y is node {} user {}", + node_id_y.fmt_short(), + user_y.fmt_short() + ); + let space_x = iroh_x.spaces().create(NamespaceKind::Owned, user_x).await?; + + let ticket = space_x + .share(user_y, AccessMode::Write, RestrictArea::None) + .await?; + + // give betty access + let (space_y, syncs) = iroh_y + .spaces() + .import_and_sync(ticket, SessionMode::ReconcileOnce) + .await?; + + let mut completions = syncs.complete_all().await; + assert_eq!(completions.len(), 1); + let completion = completions.remove(&node_id_x).unwrap(); + assert!(completion.is_ok()); + assert_eq!(completion.unwrap(), Completion::Complete); + + let count = rounds.len(); + for (i, (peer, round)) in rounds.into_iter().enumerate() { + let i = i + 1; + let (space, user) = match peer { + Peer::X => (&space_x, user_x), + Peer::Y => (&space_y, user_y), + }; + info!(active=?peer, "[{i}/{count}] round start"); + + for Operation::Write(key, value) in round { + info!(?key, ?value, "[{i}/{count}] write"); + space + .insert_bytes( + EntryForm::new(user, Path::from_bytes(&[key.as_bytes()])?), + value.clone().into_bytes(), + ) + .await?; + simulated_entries.insert((peer, key), value); + } + + // We sync in both directions. This will only create a single session under the hood. + // Awaiting both intents ensures that the sync completed on both sides. + // Alternatively, we could sync from one side only, the result must be the same, however we miss + // an event in the client currently to know when the betty peer (accepting peer) has finished. + let fut_x = async { + space_x + .sync_once(node_id_y, AreaOfInterestSelector::Widest) + .await? + .complete() + .await?; + anyhow::Ok(()) + }; + let fut_y = async { + space_y + .sync_once(node_id_x, AreaOfInterestSelector::Widest) + .await? + .complete() + .await?; + anyhow::Ok(()) + }; + let fut = async { tokio::try_join!(fut_x, fut_y) }; + tokio::time::timeout(Duration::from_secs(10), fut).await??; + + info!("[{i}/{count}] sync complete"); + + let map_x = space_to_map(&space_x, &iroh_x, user_x, user_y).await?; + let map_y = space_to_map(&space_y, &iroh_y, user_x, user_y).await?; + ensure!( + map_x == map_y, + "states out of sync:\n{map_x:#?}\n !=\n{map_y:#?}" + ); + + ensure!( + map_x == map_y, + "states out of sync:\n{map_x:#?}\n !=\n{map_y:#?}" + ); + ensure!( + simulated_entries == map_x, + "alfie in unexpected state:\n{simulated_entries:#?}\n !=\n{map_x:#?}" + ); + // follows transitively, but still + ensure!( + simulated_entries == map_y, + "betty in unexpected state:\n{simulated_entries:#?}\n !=\n{map_y:#?}" + ); + } + + info!("completed {count} rounds successfully"); + + tokio::try_join!(iroh_x.shutdown(false), iroh_y.shutdown(false))?; + + Ok(()) + }); + if let Err(err) = &res { + error!(?err, "FAILED"); + } + res.map_err(AnyhowStdErr)?; +} + +async fn space_to_map( + space: &Space, + node: &Iroh, + user_x: UserId, + user_y: UserId, +) -> anyhow::Result> { + let role_lookup = BTreeMap::from([(user_x, Peer::X), (user_y, Peer::Y)]); + let entries = space + .get_many(Range3d::new_full()) + .await? + .try_collect::<_, _, Vec<_>>() + .await?; + let mut map: BTreeMap<(Peer, String), String> = BTreeMap::new(); + for auth_entry in entries { + let (entry, auth) = auth_entry.into_parts(); + let key_component = entry + .path() + .get_component(0) + .ok_or_else(|| anyhow::anyhow!("path component missing"))?; + let key = String::from_utf8(key_component.to_vec())?; + + let value = node.blobs().read_to_bytes(entry.payload_digest().0).await?; + + let user = auth.capability.receiver(); + let peer = role_lookup + .get(user) + .ok_or_else(|| anyhow::anyhow!("foreign write?"))?; + + map.insert((*peer, key), String::from_utf8_lossy(&value).to_string()); + } + + Ok(map) +} + +#[derive(Debug)] +struct AnyhowStdErr(anyhow::Error); + +impl std::fmt::Display for AnyhowStdErr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl std::error::Error for AnyhowStdErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.0.source() + } + + fn description(&self) -> &str { + "description() is deprecated; use Display" + } + + fn cause(&self) -> Option<&dyn std::error::Error> { + self.source() + } +} + #[tokio::test] -async fn spaces_smoke() -> Result<()> { +async fn spaces_smoke() -> TestResult { iroh_test::logging::setup_multithreaded(); let (alfie_addr, alfie) = spawn_node().await; let (betty_addr, betty) = spawn_node().await; @@ -138,7 +358,7 @@ async fn spaces_smoke() -> Result<()> { } #[tokio::test] -async fn spaces_subscription() -> Result<()> { +async fn spaces_subscription() -> TestResult { iroh_test::logging::setup_multithreaded(); let (alfie_addr, alfie) = spawn_node().await; let (betty_addr, betty) = spawn_node().await; @@ -224,7 +444,7 @@ async fn spaces_subscription() -> Result<()> { #[tokio::test] async fn test_restricted_area() -> testresult::TestResult { iroh_test::logging::setup_multithreaded(); - const TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2); + const TIMEOUT: Duration = Duration::from_secs(2); let (alfie_addr, alfie) = spawn_node().await; let (betty_addr, betty) = spawn_node().await; info!("alfie is {}", alfie_addr.node_id.fmt_short()); From ee2c062b32537833c1450d8c898c77191e9c4509 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Thu, 31 Oct 2024 15:03:55 +0100 Subject: [PATCH 197/198] feat: persistent store impl using `willow_store` and `redb` (#2809) At the moment, this replaces the memory store in `willow` with the tree in `willow-store` + its `MemStore`. Next steps: - [x] Clean up - [x] Move `willow-store`/`willow-rs` glue into one sane place: - [x] conversions: `BlobSeq` <-> `Path`, `AuthorisedEntry` <-> `StoredAuthorizedEntry`, `QueryRange3d` <-> `Range3d` - [x] definitions: `StoredAuthorizedEntry`, `WillowParams` - [x] Fix spelling (Authori**s**ed vs. Authori**z**ed is inconsitent at the moment) - [x] Make use of `NamespaceStore` again - [x] Create a store based on `willow_store::RedbBlobStore` instead (this way it's persistent) - [x] Think about undoing changes to the iroh-willow memory store? Or perhaps make it use redb in memory mode? TBD. - [x] Subscriptions and subscription resumption - ~~Need access to lower-level details of willow-store to be able to use the trick of using `NodeId`s for `progress_id`s.~~ Went with the old "live subscriptions only" thing again --- Cargo.lock | 95 ++- iroh-net/src/relay/server/streams.rs | 1 + iroh-willow/Cargo.toml | 7 +- .../proptest-regressions/store/glue.txt | 7 + iroh-willow/src/engine/actor.rs | 14 +- iroh-willow/src/net.rs | 2 +- iroh-willow/src/proto/keys.rs | 23 + iroh-willow/src/proto/wgps/fingerprint.rs | 73 +- iroh-willow/src/session/reconciler.rs | 2 +- iroh-willow/src/store.rs | 4 +- iroh-willow/src/store/auth.rs | 10 +- iroh-willow/src/store/memory.rs | 116 +-- iroh-willow/src/store/persistent.rs | 755 ++++++++++++++++++ iroh-willow/src/store/persistent/tables.rs | 215 +++++ iroh-willow/src/store/traits.rs | 108 ++- iroh-willow/src/store/willow_store_glue.rs | 256 ++++++ iroh-willow/tests/basic.rs | 35 + iroh/src/node/builder.rs | 55 +- iroh/src/util/path.rs | 3 + iroh/tests/spaces.rs | 30 +- 20 files changed, 1618 insertions(+), 193 deletions(-) create mode 100644 iroh-willow/proptest-regressions/store/glue.txt create mode 100644 iroh-willow/src/store/persistent.rs create mode 100644 iroh-willow/src/store/persistent/tables.rs create mode 100644 iroh-willow/src/store/willow_store_glue.rs diff --git a/Cargo.lock b/Cargo.lock index 169259b7f6..2a160631ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -51,7 +51,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -490,6 +490,19 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake3" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -900,9 +913,9 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crossbeam-channel" @@ -1632,9 +1645,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1657,9 +1670,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -1674,9 +1687,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1708,9 +1721,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1719,15 +1732,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1737,9 +1750,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -3029,6 +3042,7 @@ dependencies = [ "curve25519-dalek", "derive_more", "ed25519-dalek", + "either", "futures-buffered", "futures-concurrency", "futures-lite 2.3.0", @@ -3036,6 +3050,7 @@ dependencies = [ "genawaiter", "hex", "iroh-base", + "iroh-blake3", "iroh-blobs", "iroh-io", "iroh-metrics", @@ -3048,6 +3063,7 @@ dependencies = [ "rand_chacha", "rand_core", "redb 2.1.1", + "self_cell", "serde", "sha2", "strum 0.26.3", @@ -3063,7 +3079,9 @@ dependencies = [ "ufotofu", "willow-data-model", "willow-encoding", - "zerocopy 0.8.0-alpha.17", + "willow-store", + "zerocopy", + "zerocopy-derive", ] [[package]] @@ -6543,6 +6561,24 @@ dependencies = [ "ufotofu", ] +[[package]] +name = "willow-store" +version = "0.1.0" +source = "git+https://github.com/n0-computer/willow-store.git?branch=matheus23/redb-ref#3a29c592fe071ff87fa763bd4650bfed789a53e6" +dependencies = [ + "anyhow", + "blake3", + "genawaiter", + "hex", + "itertools 0.13.0", + "redb 2.1.1", + "ref-cast", + "self_cell", + "smallvec", + "tracing", + "zerocopy", +] + [[package]] name = "winapi" version = "0.3.9" @@ -6977,16 +7013,8 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.0-alpha.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da056c7307048e30bce8d625c6f0633366d31f1086b3c87ed9b1f18fa1081cb1" -dependencies = [ - "zerocopy-derive 0.8.0-alpha.17", + "byteorder", + "zerocopy-derive", ] [[package]] @@ -7000,17 +7028,6 @@ dependencies = [ "syn 2.0.72", ] -[[package]] -name = "zerocopy-derive" -version = "0.8.0-alpha.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eb22123403bf9c05af423e2ced336a5fc2853df9179b42bea8144d6bf497a57" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.72", -] - [[package]] name = "zeroize" version = "1.8.1" diff --git a/iroh-net/src/relay/server/streams.rs b/iroh-net/src/relay/server/streams.rs index 096f397c93..ac52f75b72 100644 --- a/iroh-net/src/relay/server/streams.rs +++ b/iroh-net/src/relay/server/streams.rs @@ -91,6 +91,7 @@ pub enum MaybeTlsStream { Plain(tokio::net::TcpStream), /// A Tls wrapped [`tokio::net::TcpStream`] Tls(tokio_rustls::server::TlsStream), + /// For testing #[cfg(test)] Test(tokio::io::DuplexStream), } diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 8b10107711..5fdabf0780 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -20,6 +20,7 @@ bytes = { version = "1.4", features = ["serde"] } curve25519-dalek = { version = "4.1.3", features = [ "digest", "rand_core", "serde", ] } derive_more = { version = "=1.0.0-beta.7", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } +either = "1.13.0" futures-buffered = "0.2.6" futures-concurrency = "7.6.0" futures-lite = "2.3.0" @@ -27,6 +28,7 @@ futures-util = "0.3.30" genawaiter = "0.99.1" hex = "0.4.3" iroh-base = { version = "0.24.0", path = "../iroh-base" } +iroh-blake3 = "1.4.5" iroh-blobs = { version = "0.24.0", path = "../iroh-blobs" } iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.24.0", path = "../iroh-metrics", optional = true } @@ -36,6 +38,7 @@ postcard = { version = "1", default-features = false, features = [ "alloc", "use rand = "0.8.5" rand_core = "0.6.4" redb = { version = "2.0.0" } +self_cell = "1.0.4" serde = { version = "1.0.164", features = ["derive"] } sha2 = "0.10.8" strum = { version = "0.26", features = ["derive"] } @@ -48,7 +51,9 @@ tracing = "0.1" ufotofu = { version = "0.4.1", features = ["std"] } willow-data-model = "0.1.0" willow-encoding = "0.1.0" -zerocopy = { version = "0.8.0-alpha.9", features = ["derive"] } +willow-store = { git = "https://github.com/n0-computer/willow-store.git", branch = "matheus23/redb-ref" } +zerocopy = { version = "0.7", features = ["derive"] } +zerocopy-derive = "0.7" [dev-dependencies] iroh-test = { path = "../iroh-test" } diff --git a/iroh-willow/proptest-regressions/store/glue.txt b/iroh-willow/proptest-regressions/store/glue.txt new file mode 100644 index 0000000000..947d76dd0a --- /dev/null +++ b/iroh-willow/proptest-regressions/store/glue.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc fce9116f574b43cdad834dcc3e61661a64a69c6b465cc1e453d7bd9875397b24 # shrinks to input = _PropStoredTimestampOrdMatchesU64OrdArgs { num: 11844874737747751936, other: 1 } diff --git a/iroh-willow/src/engine/actor.rs b/iroh-willow/src/engine/actor.rs index f72862ca40..202193773c 100644 --- a/iroh-willow/src/engine/actor.rs +++ b/iroh-willow/src/engine/actor.rs @@ -464,10 +464,16 @@ impl Actor { Err(err) => reply.send(Err(err)).await.map_err(send_reply_error), Ok(snapshot) => { self.tasks.spawn_local(async move { - let iter = snapshot.get_authorised_entries(namespace, &range); - for entry in iter { - if reply.send(entry).await.is_err() { - break; + match snapshot.get_authorised_entries(namespace, &range) { + Ok(iter) => { + for entry in iter { + if reply.send(entry).await.is_err() { + break; + } + } + } + Err(err) => { + let _ = reply.send(Err(err)).await; } } }); diff --git a/iroh-willow/src/net.rs b/iroh-willow/src/net.rs index 45603d6e09..11bf554a42 100644 --- a/iroh-willow/src/net.rs +++ b/iroh-willow/src/net.rs @@ -10,7 +10,7 @@ use iroh_net::endpoint::{ Connection, ConnectionError, ReadError, ReadExactError, RecvStream, SendStream, VarInt, }; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tracing::{debug, trace, warn}; +use tracing::{debug, trace}; use crate::{ proto::wgps::{ diff --git a/iroh-willow/src/proto/keys.rs b/iroh-willow/src/proto/keys.rs index a51c4579d2..a8d303edaf 100644 --- a/iroh-willow/src/proto/keys.rs +++ b/iroh-willow/src/proto/keys.rs @@ -12,6 +12,9 @@ use ed25519_dalek::{SignatureError, Signer, SigningKey, Verifier, VerifyingKey}; use iroh_base::base32; use rand_core::CryptoRngCore; use serde::{Deserialize, Serialize}; +use willow_store::FixedSize; +use willow_store::IsLowerBound; +use willow_store::LowerBound; use super::meadowcap::IsCommunal; @@ -417,9 +420,29 @@ impl std::hash::Hash for UserSignature { AsRef, Serialize, Deserialize, + zerocopy_derive::FromBytes, + zerocopy_derive::AsBytes, + zerocopy_derive::FromZeroes, )] +#[repr(transparent)] pub struct UserId([u8; 32]); +impl LowerBound for UserId { + fn min_value() -> Self { + Self([0u8; 32]) + } +} + +impl IsLowerBound for UserId { + fn is_min_value(&self) -> bool { + *self == Self::min_value() + } +} + +impl FixedSize for UserId { + const SIZE: usize = std::mem::size_of::(); +} + bytestring!(UserId, PUBLIC_KEY_LENGTH); impl UserId { diff --git a/iroh-willow/src/proto/wgps/fingerprint.rs b/iroh-willow/src/proto/wgps/fingerprint.rs index d5358e2ae0..d62751432b 100644 --- a/iroh-willow/src/proto/wgps/fingerprint.rs +++ b/iroh-willow/src/proto/wgps/fingerprint.rs @@ -1,13 +1,77 @@ use std::fmt; -use iroh_blobs::Hash; use serde::{Deserialize, Serialize}; +use willow_store::{FixedSize, LiftingCommutativeMonoid, PointRef}; -use crate::proto::data_model::{Entry, EntryExt}; +use crate::{ + proto::data_model::Entry, + store::willow_store_glue::{ + path_to_blobseq, IrohWillowParams, StoredAuthorisedEntry, StoredTimestamp, + }, +}; -#[derive(Default, Serialize, Deserialize, Eq, PartialEq, Clone, Copy)] +#[derive( + Default, + Serialize, + Deserialize, + Eq, + PartialEq, + Clone, + Copy, + zerocopy_derive::FromBytes, + zerocopy_derive::AsBytes, + zerocopy_derive::FromZeroes, +)] +#[repr(transparent)] pub struct Fingerprint(pub [u8; 32]); +impl Fingerprint { + pub(crate) fn lift_stored_entry( + key: &PointRef, + payload_digest: &[u8; 32], + payload_size: u64, + ) -> Self { + let mut hasher = iroh_blake3::Hasher::default(); + hasher.update(key.as_slice()); + hasher.update(payload_digest); + hasher.update(&payload_size.to_le_bytes()); + Self(*hasher.finalize().as_bytes()) + } + + pub fn lift_entry(entry: &Entry) -> Self { + let point = willow_store::Point::::new( + entry.subspace_id(), + &StoredTimestamp::new(entry.timestamp()), + &path_to_blobseq(entry.path()), + ); + Self::lift_stored_entry( + &point, + entry.payload_digest().0.as_bytes(), + entry.payload_length(), + ) + } +} + +impl FixedSize for Fingerprint { + const SIZE: usize = std::mem::size_of::(); +} + +impl LiftingCommutativeMonoid, StoredAuthorisedEntry> for Fingerprint { + fn neutral() -> Self { + Self([0u8; 32]) + } + + fn lift(key: &PointRef, value: &StoredAuthorisedEntry) -> Self { + Self::lift_stored_entry(key, &value.payload_digest, value.payload_size) + } + + fn combine(&self, other: &Self) -> Self { + let mut slf = self.clone(); + slf ^= *other; + slf + } +} + impl fmt::Debug for Fingerprint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Fingerprint({})", iroh_base::base32::fmt_short(self.0)) @@ -17,8 +81,7 @@ impl fmt::Debug for Fingerprint { impl Fingerprint { pub fn add_entry(&mut self, entry: &Entry) { // TODO: Don't allocate - let encoded = entry.encode_to_vec(); - let next = Fingerprint(*Hash::new(&encoded).as_bytes()); + let next = Self::lift_entry(entry); *self ^= next; } diff --git a/iroh-willow/src/session/reconciler.rs b/iroh-willow/src/session/reconciler.rs index 695dec1c73..db7c15de89 100644 --- a/iroh-willow/src/session/reconciler.rs +++ b/iroh-willow/src/session/reconciler.rs @@ -546,7 +546,7 @@ impl Target { shared .store .entries() - .get_authorised_entries(self.namespace(), range) + .get_authorised_entries(self.namespace(), range)? .peekable(), ) }; diff --git a/iroh-willow/src/store.rs b/iroh-willow/src/store.rs index e729fd6e82..b3828d03bd 100644 --- a/iroh-willow/src/store.rs +++ b/iroh-willow/src/store.rs @@ -27,7 +27,9 @@ pub(crate) use self::traits::EntryOrigin; pub(crate) mod auth; pub mod memory; +pub mod persistent; pub mod traits; +pub(crate) mod willow_store_glue; /// Storage for the Willow engine. /// @@ -84,7 +86,7 @@ impl Store { }; let secret_key = self .secrets() - .get_user(&user_id) + .get_user(&user_id)? .context("Missing user keypair")?; // TODO(frando): This should use `authorisation_token_unchecked` if we uphold the invariant diff --git a/iroh-willow/src/store/auth.rs b/iroh-willow/src/store/auth.rs index 84b3c74d2a..11f01ec2fc 100644 --- a/iroh-willow/src/store/auth.rs +++ b/iroh-willow/src/store/auth.rs @@ -66,7 +66,7 @@ impl Auth { // Only allow importing caps we can use. // TODO: Is this what we want? let user_id = cap.receiver(); - if !self.secrets.has_user(&user_id) { + if !self.secrets.has_user(&user_id)? { return Err(AuthError::MissingUserSecret(user_id)); } self.caps.insert(cap)?; @@ -147,7 +147,7 @@ impl Auth { } else { let namespace_secret = self .secrets - .get_namespace(&namespace_key) + .get_namespace(&namespace_key)? .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; let read_cap = McCapability::new_owned( namespace_key, @@ -174,7 +174,7 @@ impl Auth { } else { let namespace_secret = self .secrets - .get_namespace(&namespace_key) + .get_namespace(&namespace_key)? .ok_or(AuthError::MissingNamespaceSecret(namespace_key))?; McCapability::new_owned( namespace_key, @@ -224,7 +224,7 @@ impl Auth { let user_id = read_cap.receiver(); let user_secret = self .secrets - .get_user(user_id) + .get_user(user_id)? .ok_or(AuthError::MissingUserSecret(*user_id))?; let area = restrict_area.or_default(read_cap.granted_area()); let new_read_cap = read_cap.delegate(&user_secret, &to, &area)?; @@ -255,7 +255,7 @@ impl Auth { let cap = self.get_write_cap(from)?.ok_or(AuthError::NoCapability)?; let user_secret = self .secrets - .get_user(cap.receiver()) + .get_user(cap.receiver())? .ok_or(AuthError::MissingUserSecret(*cap.receiver()))?; let area = restrict_area.or_default(cap.granted_area()); let new_cap = cap.delegate(&user_secret, &to, &area)?; diff --git a/iroh-willow/src/store/memory.rs b/iroh-willow/src/store/memory.rs index ce36738973..c609a9d1de 100644 --- a/iroh-willow/src/store/memory.rs +++ b/iroh-willow/src/store/memory.rs @@ -20,13 +20,12 @@ use crate::proto::grouping::Area; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ - data_model::{AuthorisedEntry, Entry, EntryExt, Path, SubspaceId, WriteCapability}, - grouping::{Range, Range3d, RangeEnd}, + data_model::{AuthorisedEntry, Path, SubspaceId, WriteCapability}, + grouping::Range3d, keys::{NamespaceId, NamespaceSecretKey, UserId, UserSecretKey}, meadowcap::{self, is_wider_than, ReadAuthorisation}, - wgps::Fingerprint, }, - store::traits::{self, RangeSplit, SplitAction, SplitOpts}, + store::traits, }; use super::traits::{StoreEvent, SubscribeParams}; @@ -94,12 +93,12 @@ impl traits::SecretStorage for Rc> { Ok(()) } - fn get_user(&self, id: &UserId) -> Option { - self.borrow().user.get(id).cloned() + fn get_user(&self, id: &UserId) -> Result> { + Ok(self.borrow().user.get(id).cloned()) } - fn get_namespace(&self, id: &NamespaceId) -> Option { - self.borrow().namespace.get(id).cloned() + fn get_namespace(&self, id: &NamespaceId) -> Result> { + Ok(self.borrow().namespace.get(id).cloned()) } } @@ -116,97 +115,14 @@ pub struct NamespaceStore { // impl + 'static> ReadonlyStore for T { impl traits::EntryReader for Rc> { - fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result { - let mut fingerprint = Fingerprint::default(); - for entry in self.get_entries(namespace, range) { - let entry = entry?; - fingerprint.add_entry(&entry); - } - Ok(fingerprint) - } - - fn split_range( - &self, - namespace: NamespaceId, - range: &Range3d, - config: &SplitOpts, - ) -> Result>> { - let count = self.get_entries(namespace, range).count(); - if count <= config.max_set_size { - return Ok( - vec![Ok((range.clone(), SplitAction::SendEntries(count as u64)))].into_iter(), - ); - } - let mut entries: Vec = self - .get_entries(namespace, range) - .filter_map(|e| e.ok()) - .collect(); - - entries.sort_by(|e1, e2| e1.as_sortable_tuple().cmp(&e2.as_sortable_tuple())); - - let split_index = count / 2; - let mid = entries.get(split_index).expect("not empty"); - let mut ranges = vec![]; - // split in two halves by subspace - if *mid.subspace_id() != range.subspaces().start { - ranges.push(Range3d::new( - Range::new_closed(range.subspaces().start, *mid.subspace_id()).unwrap(), - range.paths().clone(), - *range.times(), - )); - ranges.push(Range3d::new( - Range::new(*mid.subspace_id(), range.subspaces().end), - range.paths().clone(), - *range.times(), - )); - } - // split by path - else if *mid.path() != range.paths().start { - ranges.push(Range3d::new( - *range.subspaces(), - Range::new( - range.paths().start.clone(), - RangeEnd::Closed(mid.path().clone()), - ), - *range.times(), - )); - ranges.push(Range3d::new( - *range.subspaces(), - Range::new(mid.path().clone(), range.paths().end.clone()), - *range.times(), - )); - // split by time - } else { - ranges.push(Range3d::new( - *range.subspaces(), - range.paths().clone(), - Range::new(range.times().start, RangeEnd::Closed(mid.timestamp())), - )); - ranges.push(Range3d::new( - *range.subspaces(), - range.paths().clone(), - Range::new(mid.timestamp(), range.times().end), - )); - } - let mut out = vec![]; - for range in ranges { - let fingerprint = self.fingerprint(namespace, &range)?; - out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); - } - Ok(out.into_iter()) - } - - fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result { - Ok(self.get_entries(namespace, range).count() as u64) - } - fn get_authorised_entries<'a>( &'a self, namespace: NamespaceId, range: &Range3d, - ) -> impl Iterator> + 'a { + ) -> Result> + 'a> { let slf = self.borrow(); - slf.stores + Ok(slf + .stores .get(&namespace) .map(|s| &s.entries) .into_iter() @@ -214,7 +130,7 @@ impl traits::EntryReader for Rc> { .filter(|entry| range.includes_entry(entry.entry())) .map(|e| anyhow::Result::Ok(e.clone())) .collect::>() - .into_iter() + .into_iter()) } fn get_entry( @@ -410,7 +326,7 @@ impl Stream for EventStream { // TODO: This would be quite a bit more efficient if we filtered the waker with a closure // that is set from the last poll, to not wake everyone for each new event. #[derive(Debug)] -struct EventQueue { +pub(crate) struct EventQueue { events: VecDeque, offset: u64, wakers: VecDeque, @@ -435,7 +351,7 @@ impl Default for EventQueue { } impl EventQueue { - fn insert(&mut self, f: impl FnOnce(u64) -> T) { + pub(crate) fn insert(&mut self, f: impl FnOnce(u64) -> T) { let progress_id = self.next_progress_id(); let event = f(progress_id); self.events.push_back(event); @@ -444,16 +360,16 @@ impl EventQueue { } } - fn next_progress_id(&self) -> u64 { + pub(crate) fn next_progress_id(&self) -> u64 { self.offset + self.events.len() as u64 } - fn get(&self, progress_id: u64) -> Option<&T> { + pub(crate) fn get(&self, progress_id: u64) -> Option<&T> { let index = progress_id.checked_sub(self.offset)?; self.events.get(index as usize) } - fn poll_next( + pub(crate) fn poll_next( &mut self, progress_id: u64, filter: impl Fn(&T) -> bool, diff --git a/iroh-willow/src/store/persistent.rs b/iroh-willow/src/store/persistent.rs new file mode 100644 index 0000000000..f7f44ef773 --- /dev/null +++ b/iroh-willow/src/store/persistent.rs @@ -0,0 +1,755 @@ +use anyhow::Result; +use ed25519_dalek::ed25519; +use futures_util::Stream; +use redb::{Database, ReadableTable}; +use std::{ + cell::{Ref, RefCell, RefMut}, + collections::HashMap, + ops::DerefMut, + path::PathBuf, + pin::Pin, + rc::{Rc, Weak}, + task::{ready, Context, Poll}, + time::Duration, +}; +use willow_data_model::SubspaceId as _; +use willow_store::{QueryRange, QueryRange3d}; + +use crate::{ + interest::{CapSelector, CapabilityPack}, + proto::{ + data_model::{ + AuthorisationToken, AuthorisedEntry, NamespaceId, Path, PathExt as _, SubspaceId, + WriteCapability, + }, + grouping::{Area, Range3d}, + keys::{NamespaceSecretKey, UserId, UserSecretKey, UserSignature}, + meadowcap, + wgps::Fingerprint, + }, + store::willow_store_glue::{ + path_to_blobseq, to_range3d, StoredAuthorisedEntry, StoredTimestamp, + }, +}; + +use super::{ + memory, + traits::{self, SplitAction, StoreEvent, SubscribeParams}, + willow_store_glue::{to_query, IrohWillowParams}, +}; + +mod tables; + +const MAX_COMMIT_DELAY: Duration = Duration::from_millis(500); + +#[derive(derive_more::Debug, Clone)] +pub struct Store { + payloads: PS, + willow: Rc, +} + +impl Store { + pub fn new(db_path: PathBuf, payload_store: PS) -> Result { + Ok(Self { + payloads: payload_store, + willow: Rc::new(WillowStore::persistent(db_path)?), + }) + } + + pub fn new_memory(payload_store: PS) -> Result { + Ok(Self { + payloads: payload_store, + willow: Rc::new(WillowStore::memory()?), + }) + } +} + +#[derive(Debug)] +pub struct WillowStore { + db: Db, + namespace_events: RefCell>>, +} + +#[derive(derive_more::Debug)] +struct Db { + #[debug("redb::Database")] + redb: redb::Database, + tx: RefCell, +} + +#[derive(derive_more::Debug, Default)] +enum CurrentTransaction { + #[default] + None, + Write(#[debug("tables::OpenWrite")] tables::OpenWrite), + Read(#[debug("tables::OpenRead")] tables::OpenRead), +} + +impl WillowStore { + pub fn memory() -> Result { + let db = Database::builder().create_with_backend(redb::backends::InMemoryBackend::new())?; + Self::new_impl(db) + } + + /// Create or open a store from a `path` to a database file. + /// + /// The file will be created if it does not exist, otherwise it will be opened. + pub fn persistent(path: impl AsRef) -> Result { + let db = Database::create(&path.as_ref())?; + Self::new_impl(db) + } + + fn new_impl(db: Database) -> Result { + // Setup all tables + let write_tx = db.begin_write()?; + let _ = tables::Tables::new(&write_tx)?; + write_tx.commit()?; + + Ok(Self { + db: Db { + redb: db, + tx: Default::default(), + }, + namespace_events: Default::default(), + }) + } + + pub fn snapshot(&self) -> Result { + Ok(WillowSnapshot(Rc::new(self.db.snapshot_owned()?))) + } +} + +impl Db { + /// Flush the current transaction, if any. + /// + /// This is the cheapest way to ensure that the data is persisted. + fn flush(&self) -> Result<()> { + if let CurrentTransaction::Write(w) = std::mem::take(self.tx.borrow_mut().deref_mut()) { + w.commit()?; + } + Ok(()) + } + + /// Get a read-only snapshot of the database. + /// + /// This has the side effect of committing any open write transaction, + /// so it can be used as a way to ensure that the data is persisted. + fn snapshot(&self) -> Result> { + let mut guard = self.tx.borrow_mut(); + let tables = match std::mem::take(guard.deref_mut()) { + CurrentTransaction::None => { + let tx = self.redb.begin_read()?; + tables::OpenRead::new(&tx)? + } + CurrentTransaction::Write(w) => { + w.commit()?; + let tx = self.redb.begin_read()?; + tables::OpenRead::new(&tx)? + } + CurrentTransaction::Read(tables) => tables, + }; + *guard = CurrentTransaction::Read(tables); + drop(guard); + Ok(Ref::map(self.tx.borrow(), |tx| match tx { + CurrentTransaction::Read(ref tables) => tables, + _ => unreachable!(), + })) + } + + /// Get an owned read-only snapshot of the database. + /// + /// This will open a new read transaction. The read transaction won't be reused for other + /// reads. + /// + /// This has the side effect of committing any open write transaction, + /// so it can be used as a way to ensure that the data is persisted. + fn snapshot_owned(&self) -> Result { + // make sure the current transaction is committed + self.flush()?; + let tx = self.redb.begin_read()?; + let tables = tables::OpenRead::new(&tx)?; + Ok(tables) + } + + /// Get access to the tables to read from them. + /// + /// The underlying transaction is a write transaction, but with a non-mut + /// reference to the tables you can not write. + /// + /// There is no guarantee that this will be an independent transaction. + /// You just get readonly access to the current state of the database. + /// + /// As such, there is also no guarantee that the data you see is + /// already persisted. + fn tables(&self) -> Result> { + let mut guard = self.tx.borrow_mut(); + let tables = match std::mem::take(guard.deref_mut()) { + CurrentTransaction::None | CurrentTransaction::Read(_) => { + let tx = self.redb.begin_write()?; + tables::OpenWrite::new(tx)? + } + CurrentTransaction::Write(w) => { + if w.since.elapsed() > MAX_COMMIT_DELAY { + tracing::debug!("committing transaction because it's too old"); + w.commit()?; + let tx = self.redb.begin_write()?; + tables::OpenWrite::new(tx)? + } else { + w + } + } + }; + *guard = CurrentTransaction::Write(tables); + Ok(RefMut::map(guard, |tx| match tx { + CurrentTransaction::Write(ref mut tables) => tables, + _ => unreachable!(), + })) + } +} + +impl traits::Storage for Store { + type Entries = Rc; + type Secrets = Rc; + type Payloads = PS; + type Caps = Rc; + + fn entries(&self) -> &Self::Entries { + &self.willow + } + + fn secrets(&self) -> &Self::Secrets { + &self.willow + } + + fn payloads(&self) -> &Self::Payloads { + &self.payloads + } + + fn caps(&self) -> &Self::Caps { + &self.willow + } +} + +#[derive(derive_more::Debug, Clone)] +pub struct WillowSnapshot(#[debug(skip)] Rc); + +impl WillowSnapshot { + fn split_range_owned( + self, + namespace: NamespaceId, + range: &Range3d, + config: &traits::SplitOpts, + ) -> Result>> { + let max_set_size = config.max_set_size as u64; + let split_factor = config.split_factor as u64; + + let count = traits::EntryReader::count(&self, namespace, range)?; + if count <= max_set_size { + return Ok(either::Left( + Some(Ok((range.clone(), SplitAction::SendEntries(count)))).into_iter(), + )); + } + + let node_id = self + .0 + .as_ref() + .namespace_nodes + .get(namespace.as_bytes())? + .expect("node must be set if count > 0 (checked above)"); + let ns_node = willow_store::Node::::from(node_id.value()); + + Ok(either::Right( + ns_node + .split_range_owned(to_query(&range), split_factor, self.clone()) + .map({ + let ns_node = ns_node.clone(); + move |result| { + let (range, count) = result?; + if count <= max_set_size { + Ok((to_range3d(range)?, traits::SplitAction::SendEntries(count))) + } else { + let fingerprint = ns_node.range_summary(&range, &self)?; + Ok(( + to_range3d(range)?, + traits::SplitAction::SendFingerprint(fingerprint), + )) + } + } + }), + )) + } + + fn get_authorised_entries_owned( + self, + namespace: NamespaceId, + range: &Range3d, + ) -> Result>> { + let clone = Rc::clone(&self.0); + let read = self.0.as_ref(); + let Some(node_id) = read.namespace_nodes.get(namespace.as_bytes())? else { + return Ok(either::Left(std::iter::empty())); + }; + let ns_node = willow_store::Node::::from(node_id.value()); + Ok(either::Right( + ns_node + .query(&to_query(range), &read.node_store) + .map(move |result| { + let (point, stored_entry) = result?; + let id = stored_entry.authorisation_token_id; + let auth_token = get_entry_auth_token(id, &clone.auth_tokens) + .inspect_err(|e| tracing::error!(%e, "Database inconsistent, failed to fetch auth token"))?; + stored_entry.into_authorised_entry(namespace, &point, auth_token) + }) + .collect::>() + .into_iter(), + )) + } +} + +impl willow_store::BlobStoreRead for WillowSnapshot { + fn peek(&self, id: willow_store::NodeId, f: impl Fn(&[u8]) -> T) -> Result { + self.0.node_store.peek(id, f) + } +} + +impl traits::EntryReader for WillowSnapshot { + fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result { + let read = self.0.as_ref(); + let Some(node_id) = read.namespace_nodes.get(namespace.as_bytes())? else { + return Ok(Fingerprint::default()); + }; + let ns_node = willow_store::Node::::from(node_id.value()); + ns_node.range_summary(&to_query(range), &read.node_store) + } + + fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result { + let read = self.0.as_ref(); + let Some(node_id) = read.namespace_nodes.get(namespace.as_bytes())? else { + return Ok(0); + }; + let ns_node = willow_store::Node::::from(node_id.value()); + ns_node.range_count(&to_query(range), &read.node_store) + } + + fn split_range( + &self, + namespace: NamespaceId, + range: &Range3d, + config: &traits::SplitOpts, + ) -> Result>> { + self.clone().split_range_owned(namespace, range, config) + } + + fn get_entry( + &self, + namespace: NamespaceId, + subspace: SubspaceId, + path: &Path, + ) -> Result> { + let read = self.0.as_ref(); + let Some(node_id) = read.namespace_nodes.get(namespace.as_bytes())? else { + return Ok(None); + }; + let ns_node = willow_store::Node::::from(node_id.value()); + let blobseq = path_to_blobseq(path); + let end = blobseq.immediate_successor(); + let Some(result) = ns_node + .query_ordered( + &QueryRange3d { + x: QueryRange::new(subspace, subspace.successor()), + y: QueryRange::all(), + z: QueryRange::new(blobseq, Some(end)), + }, + willow_store::SortOrder::YZX, + &read.node_store, + ) + .last() + else { + return Ok(None); + }; + + let (point, stored_entry) = result?; + let id = stored_entry.authorisation_token_id; + let auth_token = get_entry_auth_token(id, &read.auth_tokens)?; + let entry = stored_entry.into_authorised_entry(namespace, &point, auth_token.clone())?; + Ok(Some(entry)) + } + + fn get_authorised_entries<'a>( + &'a self, + namespace: NamespaceId, + range: &Range3d, + ) -> Result> + 'a> { + self.clone().get_authorised_entries_owned(namespace, range) + } +} + +impl traits::EntryStorage for Rc { + type Reader = Self; + type Snapshot = WillowSnapshot; + + fn reader(&self) -> Self::Reader { + Rc::clone(self) + } + + fn snapshot(&self) -> Result { + Ok(WillowSnapshot(Rc::new(self.db.snapshot_owned()?))) + } + + fn ingest_entry( + &self, + entry: &crate::proto::data_model::AuthorisedEntry, + origin: super::EntryOrigin, + ) -> Result { + let namespace = *entry.entry().namespace_id(); + + let (insert_point, insert_entry) = StoredAuthorisedEntry::from_authorised_entry(entry); + + let mut events = self.namespace_events.borrow_mut(); + let ns_events = events.entry(namespace).or_default(); + + self.db.tables()?.modify(|write| { + // TODO(matheus23): need to get a progress_id here somehow. + // There's ideas to use the willow-store NodeId for that. + + let mut ns_node: willow_store::Node = write + .namespace_nodes + .get(namespace.as_bytes())? + .map_or(willow_store::NodeId::EMPTY, |guard| guard.value()) + .into(); + + // Enforce prefix deletion: + + let blobseq_start = path_to_blobseq(entry.entry().path()); + let blobseq_end = blobseq_start.subseq_successor(); + + let overwritten_range = QueryRange3d { + x: QueryRange::new( + *entry.entry().subspace_id(), + entry.entry().subspace_id().successor(), + ), + y: QueryRange::new( + StoredTimestamp::new(0), + Some(StoredTimestamp::new(entry.entry().timestamp())), + ), + z: QueryRange::new(blobseq_start, blobseq_end), + }; + + let prune_candidates = ns_node + .query(&overwritten_range, &write.node_store) + .collect::, _>>()?; + + for (prune_pos, prune_candidate) in prune_candidates { + let pruned_token_id = prune_candidate.authorisation_token_id; + let auth_token = get_entry_auth_token(pruned_token_id, &write.auth_tokens)?; + let pruned = + prune_candidate.into_authorised_entry(namespace, &prune_pos, auth_token)?; // fairly inefficient + if entry.entry().is_newer_than(pruned.entry()) { + // TODO(matheus23): Don't *actually* delete here? (depending on a potential traceless bit) + // There was some idea along the lines of "mark as deleted" by storing the identifier for the deletion. + ns_node.delete(&prune_pos, &mut write.node_store)?; + ns_events.insert(move |id| { + StoreEvent::Pruned( + id, + traits::PruneEvent { + pruned, + by: entry.clone(), + }, + ) + }); + // Decrease auth token refcount to allow eventually cleaning up the token + remove_entry_auth_token(write, pruned_token_id)?; + } + } + + tracing::debug!( + subspace = %entry.entry().subspace_id().fmt_short(), + path = %entry.entry().path().fmt_utf8(), + "ingest entry" + ); + + // Insert auth token & entry: + + add_entry_auth_token(entry.token(), write)?; + + let _replaced = ns_node.insert(&insert_point, &insert_entry, &mut write.node_store)?; + + ns_events.insert(|id| StoreEvent::Ingested(id, entry.clone(), origin)); + + write + .namespace_nodes + .insert(namespace.to_bytes(), ns_node.id())?; + + Ok(()) + })?; + + Ok(true) + } + + fn subscribe_area( + &self, + namespace: NamespaceId, + area: Area, + params: traits::SubscribeParams, + ) -> impl Stream + Unpin + 'static { + let namespaces = &mut self.namespace_events.borrow_mut(); + let ns_events = namespaces.entry(namespace).or_default(); + let progress_id = ns_events.next_progress_id(); + EventStream { + area, + params, + namespace, + progress_id, + store: Rc::downgrade(self), + } + } + + fn resume_subscription( + &self, + progress_id: u64, + namespace: NamespaceId, + area: Area, + params: traits::SubscribeParams, + ) -> impl Stream + Unpin + 'static { + EventStream { + area, + params, + progress_id, + namespace, + store: Rc::downgrade(self), + } + } +} + +impl traits::EntryReader for Rc { + fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result { + self.snapshot()?.fingerprint(namespace, range) + } + + fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result { + self.snapshot()?.count(namespace, range) + } + + fn split_range( + &self, + namespace: NamespaceId, + range: &Range3d, + config: &traits::SplitOpts, + ) -> Result>> { + self.snapshot()?.split_range_owned(namespace, range, config) + } + + fn get_entry( + &self, + namespace: NamespaceId, + subspace: SubspaceId, + path: &Path, + ) -> Result> { + self.snapshot()?.get_entry(namespace, subspace, path) + } + + fn get_authorised_entries<'a>( + &'a self, + namespace: NamespaceId, + range: &Range3d, + ) -> Result> + 'a> { + self.snapshot()? + .get_authorised_entries_owned(namespace, range) + } +} + +impl traits::SecretStorage for Rc { + fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), traits::SecretStoreError> { + self.db + .tables()? + .modify(|write| { + match secret { + meadowcap::SecretKey::User(user) => write + .user_secrets + .insert(user.public_key().as_bytes(), user.to_bytes())?, + meadowcap::SecretKey::Namespace(namespace) => write + .namespace_secrets + .insert(namespace.public_key().as_bytes(), namespace.to_bytes())?, + }; + Ok(()) + }) + .map_err(traits::SecretStoreError::from) + } + + fn get_user(&self, id: &UserId) -> Result> { + let tables = self.db.tables()?; + let user = tables.read().user_secrets.get(id.as_bytes())?; + Ok(user.map(|usr| UserSecretKey::from_bytes(&usr.value()))) + } + + fn get_namespace(&self, id: &NamespaceId) -> Result> { + let tables = self.db.tables()?; + let namespace = tables.read().namespace_secrets.get(id.as_bytes())?; + Ok(namespace.map(|ns| NamespaceSecretKey::from_bytes(&ns.value()))) + } +} + +impl traits::CapsStorage for Rc { + fn insert(&self, cap: CapabilityPack) -> Result<()> { + self.db.tables()?.modify(|write| { + let namespace_id = cap.namespace().to_bytes(); + match cap { + CapabilityPack::Read(r) => { + write.read_caps.insert(namespace_id, tables::ReadCap(r))? + } + CapabilityPack::Write(w) => { + write.write_caps.insert(namespace_id, tables::WriteCap(w))? + } + }; + Ok(()) + }) + } + + fn list_read_caps( + &self, + namespace: Option, + ) -> Result + '_> { + Ok(self + .db + .snapshot()? + .read_caps + .range(namespace.unwrap_or_default().to_bytes()..)? + .flat_map(|result| match result { + Err(_) => either::Left(std::iter::empty()), + Ok((_key_guard, multimap_val)) => either::Right( + multimap_val + .into_iter() + .filter_map(|result| result.ok().map(|val| val.value().0)), + ), + })) + } + + fn list_write_caps( + &self, + namespace: Option, + ) -> Result + '_> { + Ok(self + .db + .snapshot()? + .write_caps + .range(namespace.unwrap_or_default().to_bytes()..)? + .flat_map(|result| match result { + Err(_) => either::Left(std::iter::empty()), + Ok((_key_guard, multimap_val)) => either::Right( + multimap_val + .into_iter() + .filter_map(|result| result.ok().map(|val| val.value().0)), + ), + })) + } + + fn get_write_cap(&self, selector: &CapSelector) -> Result> { + Ok(self + .list_write_caps(Some(selector.namespace_id))? + .find(|cap| selector.is_covered_by(cap))) + } + + fn get_read_cap(&self, selector: &CapSelector) -> Result> { + Ok(self + .list_read_caps(Some(selector.namespace_id))? + .find(|cap| selector.is_covered_by(cap.read_cap()))) + } +} + +fn add_entry_auth_token( + token: &AuthorisationToken, + write: &mut tables::Tables<'_>, +) -> Result<[u8; 64]> { + let cap_sig_bytes = token.signature.to_bytes(); + write + .auth_tokens + .insert(cap_sig_bytes, tables::WriteCap(token.capability.clone()))?; + let refcount = write + .auth_token_refcount + .get(&cap_sig_bytes)? + .map_or(1, |rc| rc.value() + 1); + write.auth_token_refcount.insert(cap_sig_bytes, refcount)?; + Ok(cap_sig_bytes) +} + +fn get_entry_auth_token( + key: ed25519::SignatureBytes, + auth_tokens: &impl redb::ReadableTable, +) -> Result { + let capability = auth_tokens + .get(key)? + .ok_or_else(|| { + anyhow::anyhow!("couldn't find authorisation token id (database inconsistent)") + })? + .value() + .0; + Ok(AuthorisationToken { + capability, + signature: UserSignature::from_bytes(key), + }) +} + +fn remove_entry_auth_token( + write: &mut tables::Tables<'_>, + key: ed25519::SignatureBytes, +) -> Result> { + let Some(refcount) = write.auth_token_refcount.get(&key)?.map(|v| v.value()) else { + return Ok(None); + }; + debug_assert_ne!(refcount, 0); + let new_refcount = refcount - 1; + if new_refcount == 0 { + let capability = write + .auth_tokens + .remove(&key)? + .ok_or_else(|| anyhow::anyhow!("inconsistent database state"))? + .value() + .0; + write.auth_token_refcount.remove(&key)?; + Ok(Some(AuthorisationToken { + capability, + signature: UserSignature::from_bytes(key), + })) + } else { + Ok(None) + } +} + +/// Stream of events from a store subscription. +/// +/// We have weak pointer to the entry store and thus the EventQueue. +/// Once the store is dropped, the EventQueue wakes all streams a last time in its drop impl, +/// which then makes the stream return none because Weak::upgrade returns None. +#[derive(Debug)] +struct EventStream { + progress_id: u64, + store: Weak, + namespace: NamespaceId, + area: Area, + params: SubscribeParams, +} + +impl Stream for EventStream { + type Item = StoreEvent; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(inner) = self.store.upgrade() else { + return Poll::Ready(None); + }; + let mut inner_mut = inner.namespace_events.borrow_mut(); + let events = inner_mut.entry(self.namespace).or_default(); + let res = ready!(events.poll_next( + self.progress_id, + |e| e.matches(self.namespace, &self.area, &self.params), + cx, + )); + drop(inner_mut); + drop(inner); + Poll::Ready(match res { + None => None, + Some((next_id, event)) => { + self.progress_id = next_id; + Some(event) + } + }) + } +} diff --git a/iroh-willow/src/store/persistent/tables.rs b/iroh-willow/src/store/persistent/tables.rs new file mode 100644 index 0000000000..ae2054905e --- /dev/null +++ b/iroh-willow/src/store/persistent/tables.rs @@ -0,0 +1,215 @@ +use std::time::Instant; + +use anyhow::Result; +use ed25519_dalek::ed25519; +use redb::{ + MultimapTable, MultimapTableDefinition, ReadOnlyMultimapTable, ReadOnlyTable, ReadTransaction, + Table, TableDefinition, WriteTransaction, +}; +use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; +use willow_encoding::sync::{RelativeDecodable, RelativeEncodable}; + +use crate::proto::{ + grouping::Area, + meadowcap::{serde_encoding::SerdeReadAuthorisation, McCapability, ReadAuthorisation}, +}; + +// These consts are here so we don't accidentally break the schema! +pub type NamespaceId = [u8; 32]; +pub type UserId = [u8; 32]; + +pub const NAMESPACE_NODES: TableDefinition = + TableDefinition::new("namespace-nodes-0"); + +pub const AUTH_TOKENS: TableDefinition = + TableDefinition::new("auth-tokens-0"); +pub const AUTH_TOKEN_REFCOUNT: TableDefinition = + TableDefinition::new("auth-token-refcounts-0"); + +pub const USER_SECRETS: TableDefinition = TableDefinition::new("user-secrets-0"); +pub const NAMESPACE_SECRETS: TableDefinition = + TableDefinition::new("namespaces-secrets-0"); + +pub const READ_CAPS: MultimapTableDefinition = + MultimapTableDefinition::new("read-caps-0"); +pub const WRITE_CAPS: MultimapTableDefinition = + MultimapTableDefinition::new("write-caps-0"); + +self_cell::self_cell! { + struct OpenWriteInner { + owner: WriteTransaction, + #[covariant] + dependent: Tables, + } +} + +#[derive(derive_more::Debug)] +pub struct OpenWrite { + #[debug("OpenWriteInner")] + inner: OpenWriteInner, + pub since: Instant, +} + +impl OpenWrite { + pub fn new(tx: WriteTransaction) -> Result { + Ok(Self { + inner: OpenWriteInner::try_new(tx, |tx| Tables::new(tx))?, + since: Instant::now(), + }) + } + + pub fn read(&self) -> &Tables { + self.inner.borrow_dependent() + } + + pub fn modify(&mut self, f: impl FnOnce(&mut Tables) -> Result) -> Result { + self.inner.with_dependent_mut(|_, t| f(t)) + } + + pub fn commit(self) -> Result<()> { + self.inner + .into_owner() + .commit() + .map_err(anyhow::Error::from) + } +} + +pub struct Tables<'tx> { + pub namespace_nodes: Table<'tx, NamespaceId, willow_store::NodeId>, + pub auth_tokens: Table<'tx, ed25519::SignatureBytes, WriteCap>, + pub auth_token_refcount: Table<'tx, ed25519::SignatureBytes, u64>, + pub user_secrets: Table<'tx, UserId, [u8; 32]>, + pub namespace_secrets: Table<'tx, NamespaceId, [u8; 32]>, + pub read_caps: MultimapTable<'tx, NamespaceId, ReadCap>, + pub write_caps: MultimapTable<'tx, NamespaceId, WriteCap>, + pub node_store: willow_store::Tables<'tx>, +} + +impl<'tx> Tables<'tx> { + pub fn new(tx: &'tx WriteTransaction) -> Result { + Ok(Self { + namespace_nodes: tx.open_table(NAMESPACE_NODES)?, + auth_tokens: tx.open_table(AUTH_TOKENS)?, + auth_token_refcount: tx.open_table(AUTH_TOKEN_REFCOUNT)?, + user_secrets: tx.open_table(USER_SECRETS)?, + namespace_secrets: tx.open_table(NAMESPACE_SECRETS)?, + read_caps: tx.open_multimap_table(READ_CAPS)?, + write_caps: tx.open_multimap_table(WRITE_CAPS)?, + node_store: willow_store::Tables::open(tx)?, + }) + } +} + +pub struct OpenRead { + pub namespace_nodes: ReadOnlyTable, + pub auth_tokens: ReadOnlyTable, + pub read_caps: ReadOnlyMultimapTable, + pub write_caps: ReadOnlyMultimapTable, + pub node_store: willow_store::Snapshot, +} + +impl OpenRead { + pub fn new(tx: &ReadTransaction) -> Result { + Ok(Self { + namespace_nodes: tx.open_table(NAMESPACE_NODES)?, + auth_tokens: tx.open_table(AUTH_TOKENS)?, + read_caps: tx.open_multimap_table(READ_CAPS)?, + write_caps: tx.open_multimap_table(WRITE_CAPS)?, + node_store: willow_store::Snapshot::open(&tx)?, + }) + } +} + +#[derive(Debug)] +pub struct WriteCap(pub McCapability); + +impl redb::Key for WriteCap { + fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { + data1.cmp(data2) + } +} + +impl redb::Value for WriteCap { + type SelfType<'a> = Self + where + Self: 'a; + + type AsBytes<'a> = Vec + where + Self: 'a; + + fn fixed_width() -> Option { + None + } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where + Self: 'a, + { + let capability = + McCapability::relative_decode(&Area::new_full(), &mut FromSlice::new(data)).unwrap(); + WriteCap(capability) + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where + Self: 'a, + Self: 'b, + { + let mut consumer = IntoVec::new(); + value + .0 + .relative_encode(&Area::new_full(), &mut consumer) + .unwrap_or_else(|e| match e {}); // infallible + consumer.into_vec() + } + + fn type_name() -> redb::TypeName { + redb::TypeName::new("WriteCap") + } +} + +#[derive(Debug)] +#[repr(transparent)] +pub struct ReadCap(pub ReadAuthorisation); + +impl redb::Key for ReadCap { + fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { + data1.cmp(data2) + } +} + +impl redb::Value for ReadCap { + type SelfType<'a> = Self + where + Self: 'a; + + type AsBytes<'a> = Vec + where + Self: 'a; + + fn fixed_width() -> Option { + None + } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where + Self: 'a, + { + let capability: SerdeReadAuthorisation = postcard::from_bytes(data).unwrap(); + ReadCap(capability.0) + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where + Self: 'a, + Self: 'b, + { + // TODO(matheus23): Fewer clones. + postcard::to_stdvec(&SerdeReadAuthorisation(value.0.clone())).unwrap() + } + + fn type_name() -> redb::TypeName { + redb::TypeName::new("ReadCap") + } +} diff --git a/iroh-willow/src/store/traits.rs b/iroh-willow/src/store/traits.rs index ebcea1a1b7..57706b0049 100644 --- a/iroh-willow/src/store/traits.rs +++ b/iroh-willow/src/store/traits.rs @@ -5,12 +5,14 @@ use std::fmt::Debug; use anyhow::Result; use futures_lite::Stream; use serde::{Deserialize, Serialize}; +use willow_data_model::grouping::{Range, RangeEnd}; use crate::{ interest::{CapSelector, CapabilityPack}, proto::{ data_model::{ - self, AuthorisedEntry, Entry, NamespaceId, Path, SubspaceId, WriteCapability, + self, AuthorisedEntry, Entry, EntryExt as _, NamespaceId, Path, SubspaceId, + WriteCapability, }, grouping::{Area, Range3d}, keys::{NamespaceSecretKey, NamespaceSignature, UserId, UserSecretKey, UserSignature}, @@ -36,15 +38,15 @@ pub trait Storage: Debug + Clone + 'static { /// Storage for user and namespace secrets. pub trait SecretStorage: Debug + Clone + 'static { fn insert(&self, secret: meadowcap::SecretKey) -> Result<(), SecretStoreError>; - fn get_user(&self, id: &UserId) -> Option; - fn get_namespace(&self, id: &NamespaceId) -> Option; + fn get_user(&self, id: &UserId) -> Result>; + fn get_namespace(&self, id: &NamespaceId) -> Result>; - fn has_user(&self, id: &UserId) -> bool { - self.get_user(id).is_some() + fn has_user(&self, id: &UserId) -> Result { + Ok(self.get_user(id)?.is_some()) } - fn has_namespace(&self, id: &UserId) -> bool { - self.get_user(id).is_some() + fn has_namespace(&self, id: &UserId) -> Result { + Ok(self.get_user(id)?.is_some()) } fn insert_user(&self, secret: UserSecretKey) -> Result { @@ -63,7 +65,7 @@ pub trait SecretStorage: Debug + Clone + 'static { fn sign_user(&self, id: &UserId, message: &[u8]) -> Result { Ok(self - .get_user(id) + .get_user(id)? .ok_or(SecretStoreError::MissingKey)? .sign(message)) } @@ -73,7 +75,7 @@ pub trait SecretStorage: Debug + Clone + 'static { message: &[u8], ) -> Result { Ok(self - .get_namespace(id) + .get_namespace(id)? .ok_or(SecretStoreError::MissingKey)? .sign(message)) } @@ -117,16 +119,89 @@ pub trait EntryStorage: EntryReader + Clone + Debug + 'static { /// Read-only interface to [`EntryStorage`]. pub trait EntryReader: Debug + 'static { - fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result; + fn fingerprint(&self, namespace: NamespaceId, range: &Range3d) -> Result { + let mut fingerprint = Fingerprint::default(); + for entry in self.get_entries(namespace, range)? { + let entry = entry?; + fingerprint.add_entry(&entry); + } + Ok(fingerprint) + } fn split_range( &self, namespace: NamespaceId, range: &Range3d, config: &SplitOpts, - ) -> Result>>; + ) -> Result>> { + let count = self.count(namespace, range)? as usize; + if count <= config.max_set_size { + return Ok( + vec![Ok((range.clone(), SplitAction::SendEntries(count as u64)))].into_iter(), + ); + } + let mut entries: Vec = self + .get_entries(namespace, range)? + .filter_map(|e| e.ok()) + .collect(); + + entries.sort_by(|e1, e2| e1.as_sortable_tuple().cmp(&e2.as_sortable_tuple())); + + let split_index = count / 2; + let mid = entries.get(split_index).expect("not empty"); + let mut ranges = vec![]; + // split in two halves by subspace + if *mid.subspace_id() != range.subspaces().start { + ranges.push(Range3d::new( + Range::new_closed(range.subspaces().start, *mid.subspace_id()).unwrap(), + range.paths().clone(), + *range.times(), + )); + ranges.push(Range3d::new( + Range::new(*mid.subspace_id(), range.subspaces().end), + range.paths().clone(), + *range.times(), + )); + } + // split by path + else if *mid.path() != range.paths().start { + ranges.push(Range3d::new( + *range.subspaces(), + Range::new( + range.paths().start.clone(), + RangeEnd::Closed(mid.path().clone()), + ), + *range.times(), + )); + ranges.push(Range3d::new( + *range.subspaces(), + Range::new(mid.path().clone(), range.paths().end.clone()), + *range.times(), + )); + // split by time + } else { + ranges.push(Range3d::new( + *range.subspaces(), + range.paths().clone(), + Range::new(range.times().start, RangeEnd::Closed(mid.timestamp())), + )); + ranges.push(Range3d::new( + *range.subspaces(), + range.paths().clone(), + Range::new(mid.timestamp(), range.times().end), + )); + } + let mut out = vec![]; + for range in ranges { + let fingerprint = self.fingerprint(namespace, &range)?; + out.push(Ok((range, SplitAction::SendFingerprint(fingerprint)))); + } + Ok(out.into_iter()) + } - fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result; + fn count(&self, namespace: NamespaceId, range: &Range3d) -> Result { + Ok(self.get_entries(namespace, range)?.count() as u64) + } fn get_entry( &self, @@ -139,15 +214,16 @@ pub trait EntryReader: Debug + 'static { &'a self, namespace: NamespaceId, range: &Range3d, - ) -> impl Iterator> + 'a; + ) -> Result> + 'a>; fn get_entries( &self, namespace: NamespaceId, range: &Range3d, - ) -> impl Iterator> { - self.get_authorised_entries(namespace, range) - .map(|e| e.map(|e| e.into_parts().0)) + ) -> Result>> { + Ok(self + .get_authorised_entries(namespace, range)? + .map(|e| e.map(|e| e.into_parts().0))) } } diff --git a/iroh-willow/src/store/willow_store_glue.rs b/iroh-willow/src/store/willow_store_glue.rs new file mode 100644 index 0000000000..d556ce3483 --- /dev/null +++ b/iroh-willow/src/store/willow_store_glue.rs @@ -0,0 +1,256 @@ +//! Code required for willow-rs and willow-store to interface together. + +use std::fmt::Display; + +use anyhow::Result; +use ed25519_dalek::ed25519; +use iroh_blobs::Hash; +use willow_data_model::grouping::{Range, RangeEnd}; +use willow_store::{ + BlobSeq, BlobSeqRef, FixedSize, IsLowerBound, KeyParams, LowerBound, Point, QueryRange, + QueryRange3d, TreeParams, +}; + +use crate::proto::{ + data_model::{ + AuthorisationToken, AuthorisedEntry, Component, Entry, NamespaceId, Path, PayloadDigest, + SubspaceId, Timestamp, + }, + grouping::Range3d, + wgps::Fingerprint, +}; + +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + zerocopy_derive::FromBytes, + zerocopy_derive::AsBytes, + zerocopy_derive::FromZeroes, +)] +#[repr(packed)] +pub(crate) struct StoredAuthorisedEntry { + pub(crate) authorisation_token_id: ed25519::SignatureBytes, + pub(crate) payload_digest: [u8; 32], + pub(crate) payload_size: u64, +} + +impl FixedSize for StoredAuthorisedEntry { + const SIZE: usize = std::mem::size_of::(); +} + +impl StoredAuthorisedEntry { + pub fn from_authorised_entry(entry: &AuthorisedEntry) -> (Point, Self) { + let point = willow_store::Point::::new( + entry.entry().subspace_id(), + &StoredTimestamp::new(entry.entry().timestamp()), + &path_to_blobseq(entry.entry().path()), + ); + let entry = Self { + authorisation_token_id: entry.token().signature.to_bytes(), + payload_digest: *entry.entry().payload_digest().0.as_bytes(), + payload_size: entry.entry().payload_length(), + }; + (point, entry) + } + + pub fn into_authorised_entry( + self, + namespace: NamespaceId, + key: &Point, + auth_token: AuthorisationToken, + ) -> Result { + Ok(AuthorisedEntry::new( + self.into_entry(namespace, key)?, + auth_token, + )?) + } + + pub fn into_entry( + self, + namespace: NamespaceId, + key: &Point, + ) -> Result { + let subspace = key.x(); + let timestamp = key.y(); + let blobseq = key.z().to_owned(); + let path = blobseq_to_path(&blobseq)?; + Ok(Entry::new( + namespace, + *subspace, + path, + timestamp.timestamp(), + self.payload_size, + PayloadDigest(Hash::from_bytes(self.payload_digest)), + )) + } +} + +/// A newtype around memory that represents a timestamp. +/// +/// This newtype is needed to avoid alignment issues. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + zerocopy_derive::FromBytes, + zerocopy_derive::AsBytes, + zerocopy_derive::FromZeroes, +)] +#[repr(packed)] +pub(crate) struct StoredTimestamp([u8; 8]); + +impl LowerBound for StoredTimestamp { + fn min_value() -> Self { + Self([0u8; 8]) + } +} + +impl IsLowerBound for StoredTimestamp { + fn is_min_value(&self) -> bool { + self.0 == [0u8; 8] + } +} + +impl FixedSize for StoredTimestamp { + const SIZE: usize = std::mem::size_of::(); +} + +impl Display for StoredTimestamp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.timestamp().fmt(f) + } +} + +// The `StoredTimestamp` needs to be big-endian so the derived +// `Ord` instance on the inner [u8; 8] matches the ord instance +// of the equivalent u64. +// See also the associated proptest in this module. +impl StoredTimestamp { + pub(crate) fn new(ts: Timestamp) -> Self { + Self(ts.to_be_bytes()) + } + + pub(crate) fn timestamp(&self) -> Timestamp { + u64::from_be_bytes(self.0) + } +} + +#[derive(Debug, Default, Clone, Copy, Ord, PartialOrd, PartialEq, Eq)] +pub(crate) struct IrohWillowParams; + +impl TreeParams for IrohWillowParams { + type V = StoredAuthorisedEntry; + type M = Fingerprint; +} + +impl KeyParams for IrohWillowParams { + type X = SubspaceId; + type Y = StoredTimestamp; + type ZOwned = BlobSeq; + type Z = BlobSeqRef; +} + +pub(crate) fn path_to_blobseq(path: &Path) -> BlobSeq { + let path_bytes = path + .components() + .map(|component| component.to_vec()) + .collect::>(); + + BlobSeq::from(path_bytes) +} + +pub(crate) fn blobseq_to_path(blobseq: &BlobSeq) -> Result { + let components = blobseq + .components() + .map(|c| { + Component::new(c) + .ok_or_else(|| anyhow::anyhow!("Path component exceeded length restriction")) + }) + .collect::>>()?; + let total_length = components.iter().map(|c| c.len()).sum::(); + let path = Path::new_from_iter(total_length, &mut components.into_iter())?; + Ok(path) +} + +pub(crate) fn to_query(range3d: &Range3d) -> QueryRange3d { + let path_start = path_to_blobseq(&range3d.paths().start); + let path_end = match &range3d.paths().end { + RangeEnd::Closed(end) => Some(path_to_blobseq(end)), + RangeEnd::Open => None, + }; + QueryRange3d { + x: to_query_range(range3d.subspaces()), + y: to_query_range(&map_range(range3d.times(), |ts| StoredTimestamp::new(*ts))), + z: QueryRange::new(path_start, path_end), + } +} + +pub(crate) fn to_query_range(range: &Range) -> QueryRange { + QueryRange::new( + range.start.clone(), + match &range.end { + RangeEnd::Closed(end) => Some(end.clone()), + RangeEnd::Open => None, + }, + ) +} + +pub(crate) fn to_range3d(query_range3d: QueryRange3d) -> Result { + let path_max = match query_range3d.z.max { + Some(max) => RangeEnd::Closed(blobseq_to_path(&max)?), + None => RangeEnd::Open, + }; + Ok(Range3d::new( + to_range(query_range3d.x), + Range { + start: blobseq_to_path(&query_range3d.z.min)?, + end: path_max, + }, + Range { + start: query_range3d.y.min.timestamp(), + end: query_range3d + .y + .max + .map_or(RangeEnd::Open, |ts| RangeEnd::Closed(ts.timestamp())), + }, + )) +} + +fn to_range(qr: QueryRange) -> Range { + Range { + start: qr.min, + end: qr.max.map_or(RangeEnd::Open, RangeEnd::Closed), + } +} + +pub(crate) fn map_range(range: &Range, f: impl Fn(&S) -> T) -> Range { + Range { + start: f(&range.start), + end: match &range.end { + RangeEnd::Closed(end) => RangeEnd::Closed(f(end)), + RangeEnd::Open => RangeEnd::Open, + }, + } +} + +#[cfg(test)] +mod tests { + use proptest::prop_assert_eq; + use test_strategy::proptest; + + use super::StoredTimestamp; + + #[proptest] + fn prop_stored_timestamp_ord_matches_u64_ord(num: u64, other: u64) { + let expected = num.cmp(&other); + let actual = StoredTimestamp::new(num).cmp(&StoredTimestamp::new(other)); + prop_assert_eq!(expected, actual); + } +} diff --git a/iroh-willow/tests/basic.rs b/iroh-willow/tests/basic.rs index 8c11dcb5d1..9bf92cba89 100644 --- a/iroh-willow/tests/basic.rs +++ b/iroh-willow/tests/basic.rs @@ -7,6 +7,7 @@ use futures_lite::StreamExt; use iroh_blobs::store::{Map, MapEntry}; use iroh_io::AsyncSliceReaderExt; +use iroh_net::key::SecretKey; use iroh_willow::{ form::EntryForm, interest::{CapSelector, DelegateTo, Interests, IntoAreaOfInterest, RestrictArea}, @@ -279,6 +280,40 @@ async fn peer_manager_twoway_loop() -> Result<()> { Ok(()) } +/// Regression test. Used to fail due to redb's slices being unaligned, +/// and previously timestamps being represented as u64, thus failing to +/// zerocopy-deserialize. +#[tokio::test(flavor = "multi_thread")] +async fn read_back_write() -> Result<()> { + iroh_test::logging::setup_multithreaded(); + let mut rng = create_rng("read_back_write"); + + let alfie = Peer::spawn(SecretKey::generate_with_rng(&mut rng), Default::default()).await?; + + let user_alfie = alfie.create_user().await?; + let namespace_id = alfie + .create_namespace(NamespaceKind::Owned, user_alfie) + .await?; + + for i in 0u64..2 { + let path = Path::from_bytes(&[b"foo", &i.to_le_bytes()])?; + let entry = EntryForm::new_bytes(namespace_id, path, "foo"); + alfie.insert_entry(entry, user_alfie).await?; + } + + let entries: Vec<_> = alfie + .get_entries(namespace_id, Range3d::new_full()) + .await? + .try_collect() + .await?; + + println!("{entries:#?}"); + + assert_eq!(entries.len(), 2); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread")] async fn owned_namespace_subspace_write_sync() -> Result<()> { iroh_test::logging::setup_multithreaded(); diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index cf00bffe4d..8a3891cea2 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -85,8 +85,10 @@ pub enum SpacesStorage { /// In-memory storage. Memory, /// File-based persistent storage. - #[allow(unused)] Persistent(PathBuf), + /// (test only) persistent storage with in-memory redb backend. + #[cfg(feature = "test-utils")] + PersistentTest, } /// Builder for the [`Node`]. @@ -335,6 +337,7 @@ where format!("Failed to load blobs database from {}", blob_dir.display()) })?; let docs_storage = DocsStorage::Persistent(IrohPaths::DocsDatabase.with_root(root)); + let spaces_storage = SpacesStorage::Persistent(IrohPaths::SpacesDatabase.with_root(root)); let secret_key_path = IrohPaths::SecretKey.with_root(root); let secret_key = load_secret_key(secret_key_path).await?; @@ -352,8 +355,7 @@ where dns_resolver: self.dns_resolver, gc_policy: self.gc_policy, docs_storage, - // TODO: Switch to SpacesStorage::Persistent once we have a store. - spaces_storage: SpacesStorage::Disabled, + spaces_storage, node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -362,6 +364,19 @@ where }) } + /// Enables the persistent store, but with an in-memory backend. + /// + /// There's significant differences between the naive spaces in-memory store + /// and the persistent store, even if it's using the in-memory backend, + /// making it useful to test these two implementations against each other. + #[cfg(feature = "test-utils")] + pub fn enable_spaces_persist_test_mode(mut self, test_mode: bool) -> Self { + if test_mode == true { + self.spaces_storage = SpacesStorage::PersistentTest; + } + self + } + /// Configure rpc endpoint. pub fn rpc_endpoint(self, value: IrohServerEndpoint, rpc_addr: Option) -> Self { Self { @@ -661,11 +676,27 @@ where ) .await?; + let blobs_store = self.blobs_store.clone(); let willow = match self.spaces_storage { SpacesStorage::Disabled => None, SpacesStorage::Memory => { - let blobs_store = self.blobs_store.clone(); - let create_store = move || iroh_willow::store::memory::Store::new(blobs_store); + let create_store = move || { + // iroh_willow::store::memory::Store::new(blobs_store) + iroh_willow::store::persistent::Store::new_memory(blobs_store) + .expect("couldn't initialize store") + }; + let engine = iroh_willow::Engine::spawn( + endpoint.clone(), + create_store, + iroh_willow::engine::AcceptOpts::default(), + ); + Some(engine) + } + SpacesStorage::Persistent(path) => { + let create_store = move || { + iroh_willow::store::persistent::Store::new(path, blobs_store) + .expect("failed to spawn persistent store") // TODO(matheus23): introduce fallibility? + }; let engine = iroh_willow::Engine::spawn( endpoint.clone(), create_store, @@ -673,8 +704,18 @@ where ); Some(engine) } - SpacesStorage::Persistent(_) => { - unimplemented!("persistent storage for willow is not yet implemented") + #[cfg(feature = "test-utils")] + SpacesStorage::PersistentTest => { + let create_store = move || { + iroh_willow::store::persistent::Store::new_memory(blobs_store) + .expect("couldn't initialize store") + }; + let engine = iroh_willow::Engine::spawn( + endpoint.clone(), + create_store, + iroh_willow::engine::AcceptOpts::default(), + ); + Some(engine) } }; // Spawn the willow engine. diff --git a/iroh/src/util/path.rs b/iroh/src/util/path.rs index f7ee91af40..8d6aa706f4 100644 --- a/iroh/src/util/path.rs +++ b/iroh/src/util/path.rs @@ -15,6 +15,9 @@ pub enum IrohPaths { /// Path to the [iroh-docs document database](iroh_docs::store::fs::Store) #[strum(serialize = "docs.redb")] DocsDatabase, + /// Path to the iroh-willow database + #[strum(serialize = "spaces.redb")] + SpacesDatabase, /// Path to the console state #[strum(serialize = "console")] Console, diff --git a/iroh/tests/spaces.rs b/iroh/tests/spaces.rs index d7becda3f5..cc41160363 100644 --- a/iroh/tests/spaces.rs +++ b/iroh/tests/spaces.rs @@ -25,7 +25,7 @@ use tracing::{error, info}; /// Spawn an iroh node in a separate thread and tokio runtime, and return /// the address and client. -async fn spawn_node() -> (NodeAddr, Iroh) { +async fn spawn_node(persist_test_mode: bool) -> (NodeAddr, Iroh) { let (sender, receiver) = tokio::sync::oneshot::channel(); std::thread::spawn(move || { let runtime = tokio::runtime::Builder::new_multi_thread() @@ -34,6 +34,7 @@ async fn spawn_node() -> (NodeAddr, Iroh) { runtime.block_on(async move { let secret_key = SecretKey::generate(); let node = iroh::node::Builder::default() + .enable_spaces_persist_test_mode(persist_test_mode) .secret_key(secret_key) .relay_mode(iroh_net::relay::RelayMode::Disabled) .node_discovery(iroh::node::DiscoveryConfig::None) @@ -76,8 +77,11 @@ enum Peer { Y, } -#[proptest] -fn test_get_many_weird_result( +#[proptest(cases = 32)] +fn prop_sync_simulation_matches_model( + // these bools govern whether to use the memory store or the persistent store with an in-memory backend. + x_is_persist: bool, + y_is_persist: bool, #[strategy(vec((role(), vec(simple_op(), 0..20)), 0..20))] rounds: Vec<(Peer, Vec)>, ) { iroh_test::logging::setup_multithreaded(); @@ -89,8 +93,8 @@ fn test_get_many_weird_result( .block_on(async { let mut simulated_entries: BTreeMap<(Peer, String), String> = BTreeMap::new(); - let (addr_x, iroh_x) = spawn_node().await; - let (addr_y, iroh_y) = spawn_node().await; + let (addr_x, iroh_x) = spawn_node(x_is_persist).await; + let (addr_y, iroh_y) = spawn_node(y_is_persist).await; let node_id_x = addr_x.node_id; let node_id_y = addr_y.node_id; iroh_x.net().add_node_addr(addr_y.clone()).await?; @@ -166,7 +170,7 @@ fn test_get_many_weird_result( anyhow::Ok(()) }; let fut = async { tokio::try_join!(fut_x, fut_y) }; - tokio::time::timeout(Duration::from_secs(10), fut).await??; + tokio::time::timeout(Duration::from_secs(40), fut).await??; info!("[{i}/{count}] sync complete"); @@ -264,8 +268,8 @@ impl std::error::Error for AnyhowStdErr { #[tokio::test] async fn spaces_smoke() -> TestResult { iroh_test::logging::setup_multithreaded(); - let (alfie_addr, alfie) = spawn_node().await; - let (betty_addr, betty) = spawn_node().await; + let (alfie_addr, alfie) = spawn_node(false).await; + let (betty_addr, betty) = spawn_node(false).await; info!("alfie is {}", alfie_addr.node_id.fmt_short()); info!("betty is {}", betty_addr.node_id.fmt_short()); @@ -360,8 +364,8 @@ async fn spaces_smoke() -> TestResult { #[tokio::test] async fn spaces_subscription() -> TestResult { iroh_test::logging::setup_multithreaded(); - let (alfie_addr, alfie) = spawn_node().await; - let (betty_addr, betty) = spawn_node().await; + let (alfie_addr, alfie) = spawn_node(false).await; + let (betty_addr, betty) = spawn_node(false).await; info!("alfie is {}", alfie_addr.node_id.fmt_short()); info!("betty is {}", betty_addr.node_id.fmt_short()); @@ -444,9 +448,9 @@ async fn spaces_subscription() -> TestResult { #[tokio::test] async fn test_restricted_area() -> testresult::TestResult { iroh_test::logging::setup_multithreaded(); - const TIMEOUT: Duration = Duration::from_secs(2); - let (alfie_addr, alfie) = spawn_node().await; - let (betty_addr, betty) = spawn_node().await; + const TIMEOUT: Duration = Duration::from_secs(20); + let (alfie_addr, alfie) = spawn_node(false).await; + let (betty_addr, betty) = spawn_node(false).await; info!("alfie is {}", alfie_addr.node_id.fmt_short()); info!("betty is {}", betty_addr.node_id.fmt_short()); let alfie_user = alfie.spaces().create_user().await?; From 8fd40a59b6fa664cc4b2f47d65be748950619eea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Thu, 31 Oct 2024 15:29:32 +0100 Subject: [PATCH 198/198] Fix compilation after merge --- Cargo.lock | 2 +- iroh-willow/Cargo.toml | 12 +++++------ iroh-willow/src/engine.rs | 4 ++-- iroh-willow/src/engine/peer_manager.rs | 2 +- iroh/src/node.rs | 30 -------------------------- iroh/src/node/builder.rs | 2 +- iroh/src/node/protocol.rs | 8 +++++++ 7 files changed, 19 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee611751d2..72fba41a2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2986,7 +2986,7 @@ dependencies = [ [[package]] name = "iroh-willow" -version = "0.24.0" +version = "0.27.0" dependencies = [ "anyhow", "bytes", diff --git a/iroh-willow/Cargo.toml b/iroh-willow/Cargo.toml index 5fdabf0780..eaede55d8d 100644 --- a/iroh-willow/Cargo.toml +++ b/iroh-willow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-willow" -version = "0.24.0" +version = "0.27.0" edition = "2021" readme = "README.md" description = "willow protocol implementation for iroh" @@ -18,7 +18,7 @@ workspace = true anyhow = "1" bytes = { version = "1.4", features = ["serde"] } curve25519-dalek = { version = "4.1.3", features = [ "digest", "rand_core", "serde", ] } -derive_more = { version = "=1.0.0-beta.7", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } +derive_more = { version = "1.0.0", features = [ "debug", "deref", "display", "from", "try_into", "into", "as_ref", "try_from", ] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } either = "1.13.0" futures-buffered = "0.2.6" @@ -27,12 +27,12 @@ futures-lite = "2.3.0" futures-util = "0.3.30" genawaiter = "0.99.1" hex = "0.4.3" -iroh-base = { version = "0.24.0", path = "../iroh-base" } +iroh-base = { version = "0.27.0", path = "../iroh-base" } iroh-blake3 = "1.4.5" -iroh-blobs = { version = "0.24.0", path = "../iroh-blobs" } +iroh-blobs = { version = "0.27.0", path = "../iroh-blobs" } iroh-io = { version = "0.6.0", features = ["stats"] } -iroh-metrics = { version = "0.24.0", path = "../iroh-metrics", optional = true } -iroh-net = { version = "0.24.0", path = "../iroh-net" } +iroh-metrics = { version = "0.27.0", path = "../iroh-metrics", optional = true } +iroh-net = { version = "0.27.0", path = "../iroh-net" } meadowcap = "0.1.0" postcard = { version = "1", default-features = false, features = [ "alloc", "use-std", "experimental-derive", ] } rand = "0.8.5" diff --git a/iroh-willow/src/engine.rs b/iroh-willow/src/engine.rs index 4f506620bc..816f95546c 100644 --- a/iroh-willow/src/engine.rs +++ b/iroh-willow/src/engine.rs @@ -123,14 +123,14 @@ impl Engine { /// /// This will try to close all connections gracefully for up to 10 seconds, /// and abort them otherwise. - pub async fn shutdown(mut self) -> Result<()> { + pub async fn shutdown(&self) -> Result<()> { debug!("shutdown engine"); let (reply, reply_rx) = oneshot::channel(); self.peer_manager_inbox .send(peer_manager::Input::Shutdown { reply }) .await?; reply_rx.await?; - let res = (&mut self.peer_manager_task).await; + let res = self.peer_manager_task.clone().await; match res { Err(err) => error!(?err, "peer manager task panicked"), Ok(Err(err)) => error!(?err, "peer manager task failed"), diff --git a/iroh-willow/src/engine/peer_manager.rs b/iroh-willow/src/engine/peer_manager.rs index 8b43bd45cd..50cac9860a 100644 --- a/iroh-willow/src/engine/peer_manager.rs +++ b/iroh-willow/src/engine/peer_manager.rs @@ -304,7 +304,7 @@ impl PeerManager { let fut = async move { debug!("connecting"); let conn = tokio::select! { - res = endpoint.connect_by_node_id(peer, ALPN) => res, + res = endpoint.connect(peer, ALPN) => res, _ = cancel_dial.cancelled() => { debug!("dial cancelled during dial"); return Err(ConnectionError::LocallyClosed.into()); diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 1125336d35..1f1b233b3f 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -464,30 +464,6 @@ impl NodeInner { async fn shutdown(&self, protocols: Arc) { let error_code = Closed::ProviderTerminating; - // Shutdown future for the docs engine, if enabled. - let docs_shutdown = { - let docs = self.docs.clone(); - async move { - if let Some(docs) = docs { - docs.shutdown().await - } else { - Ok(()) - } - } - }; - - // Shutdown willow gracefully. - let spaces_shutdown = { - let engine = self.willow.clone(); - async move { - if let Some(engine) = engine { - if let Err(error) = engine.shutdown().await { - warn!(?error, "Error while shutting down willow"); - } - } - } - }; - // We ignore all errors during shutdown. let _ = tokio::join!( // Close the endpoint. @@ -497,12 +473,6 @@ impl NodeInner { self.endpoint .clone() .close(error_code.into(), error_code.reason()), - // Shutdown docs engine. - docs_shutdown, - // Shutdown spaces engine. - spaces_shutdown, - // Shutdown blobs store engine. - self.db.shutdown(), // Shutdown protocol handlers. protocols.shutdown(), ); diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index fa110fc278..0daa3a02ee 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -918,7 +918,7 @@ impl ProtocolBuilder { } if let Some(engine) = self.inner.willow.clone() { - self = self.accept(iroh_willow::ALPN, Arc::new(engine)); + self = self.accept(iroh_willow::ALPN.to_vec(), Arc::new(engine)); } self diff --git a/iroh/src/node/protocol.rs b/iroh/src/node/protocol.rs index d4ca2e62b7..c6a17b5c35 100644 --- a/iroh/src/node/protocol.rs +++ b/iroh/src/node/protocol.rs @@ -348,4 +348,12 @@ impl ProtocolHandler for iroh_willow::Engine { fn accept(self: Arc, conn: Connecting) -> BoxedFuture> { Box::pin(async move { self.handle_connection(conn.await?).await }) } + + fn shutdown(self: Arc) -> BoxedFuture<()> { + Box::pin(async move { + if let Err(e) = (&**self).shutdown().await { + tracing::error!(?e, "Error while shutting down willow engine"); + } + }) + } }